[ARM] Change noread to purecode.
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1764 };
1765
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769 HOWTO (R_ARM_RREL32, /* type */
1770 0, /* rightshift */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1772 0, /* bitsize */
1773 FALSE, /* pc_relative */
1774 0, /* bitpos */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1779 0, /* src_mask */
1780 0, /* dst_mask */
1781 FALSE), /* pcrel_offset */
1782
1783 HOWTO (R_ARM_RABS32, /* type */
1784 0, /* rightshift */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1786 0, /* bitsize */
1787 FALSE, /* pc_relative */
1788 0, /* bitpos */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1793 0, /* src_mask */
1794 0, /* dst_mask */
1795 FALSE), /* pcrel_offset */
1796
1797 HOWTO (R_ARM_RPC24, /* type */
1798 0, /* rightshift */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1800 0, /* bitsize */
1801 FALSE, /* pc_relative */
1802 0, /* bitpos */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1807 0, /* src_mask */
1808 0, /* dst_mask */
1809 FALSE), /* pcrel_offset */
1810
1811 HOWTO (R_ARM_RBASE, /* type */
1812 0, /* rightshift */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1814 0, /* bitsize */
1815 FALSE, /* pc_relative */
1816 0, /* bitpos */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1821 0, /* src_mask */
1822 0, /* dst_mask */
1823 FALSE) /* pcrel_offset */
1824 };
1825
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1831
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838
1839 return NULL;
1840 }
1841
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1845 {
1846 unsigned int r_type;
1847
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851
1852 struct elf32_arm_reloc_map
1853 {
1854 bfd_reloc_code_real_type bfd_reloc_val;
1855 unsigned char elf_reloc_val;
1856 };
1857
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860 {
1861 {BFD_RELOC_NONE, R_ARM_NONE},
1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1867 {BFD_RELOC_32, R_ARM_ABS32},
1868 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1869 {BFD_RELOC_8, R_ARM_ABS8},
1870 {BFD_RELOC_16, R_ARM_ABS16},
1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951 };
1952
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 bfd_reloc_code_real_type code)
1956 {
1957 unsigned int i;
1958
1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962
1963 return NULL;
1964 }
1965
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 const char *r_name)
1969 {
1970 unsigned int i;
1971
1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973 if (elf32_arm_howto_table_1[i].name != NULL
1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975 return &elf32_arm_howto_table_1[i];
1976
1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978 if (elf32_arm_howto_table_2[i].name != NULL
1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980 return &elf32_arm_howto_table_2[i];
1981
1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983 if (elf32_arm_howto_table_3[i].name != NULL
1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985 return &elf32_arm_howto_table_3[i];
1986
1987 return NULL;
1988 }
1989
1990 /* Support for core dump NOTE sections. */
1991
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995 int offset;
1996 size_t size;
1997
1998 switch (note->descsz)
1999 {
2000 default:
2001 return FALSE;
2002
2003 case 148: /* Linux/ARM 32-bit. */
2004 /* pr_cursig */
2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006
2007 /* pr_pid */
2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009
2010 /* pr_reg */
2011 offset = 72;
2012 size = 72;
2013
2014 break;
2015 }
2016
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 size, note->descpos + offset);
2020 }
2021
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025 switch (note->descsz)
2026 {
2027 default:
2028 return FALSE;
2029
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd)->core->pid
2032 = bfd_get_32 (abfd, note->descdata + 12);
2033 elf_tdata (abfd)->core->program
2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 elf_tdata (abfd)->core->command
2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037 }
2038
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2042 {
2043 char *command = elf_tdata (abfd)->core->command;
2044 int n = strlen (command);
2045
2046 if (0 < n && command[n - 1] == ' ')
2047 command[n - 1] = '\0';
2048 }
2049
2050 return TRUE;
2051 }
2052
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 int note_type, ...)
2056 {
2057 switch (note_type)
2058 {
2059 default:
2060 return NULL;
2061
2062 case NT_PRPSINFO:
2063 {
2064 char data[124];
2065 va_list ap;
2066
2067 va_start (ap, note_type);
2068 memset (data, 0, sizeof (data));
2069 strncpy (data + 28, va_arg (ap, const char *), 16);
2070 strncpy (data + 44, va_arg (ap, const char *), 80);
2071 va_end (ap);
2072
2073 return elfcore_write_note (abfd, buf, bufsiz,
2074 "CORE", note_type, data, sizeof (data));
2075 }
2076
2077 case NT_PRSTATUS:
2078 {
2079 char data[148];
2080 va_list ap;
2081 long pid;
2082 int cursig;
2083 const void *greg;
2084
2085 va_start (ap, note_type);
2086 memset (data, 0, sizeof (data));
2087 pid = va_arg (ap, long);
2088 bfd_put_32 (abfd, pid, data + 24);
2089 cursig = va_arg (ap, int);
2090 bfd_put_16 (abfd, cursig, data + 12);
2091 greg = va_arg (ap, const void *);
2092 memcpy (data + 72, greg, 72);
2093 va_end (ap);
2094
2095 return elfcore_write_note (abfd, buf, bufsiz,
2096 "CORE", note_type, data, sizeof (data));
2097 }
2098 }
2099 }
2100
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2105
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2109
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114 interworkable. */
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2119
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2126
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2129
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2132
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2135
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2138
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2140
2141 /* The name of the dynamic interpreter. This is put in the .interp
2142 section. */
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2144
2145 static const unsigned long tls_trampoline [] =
2146 {
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2150 };
2151
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164
2165 #ifdef FOUR_WORD_PLT
2166
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2170 linker first. */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2177 };
2178
2179 /* Subsequent entries in a procedure linkage table look like
2180 this. */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2187 };
2188
2189 #else /* not FOUR_WORD_PLT */
2190
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2194 linker first. */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2202 };
2203
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2211 };
2212
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2221 };
2222
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224
2225 #endif /* not FOUR_WORD_PLT */
2226
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2236 /* add lr, pc */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2239 };
2240
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242 look like this. */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2251 /* nop */
2252 };
2253
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2262 };
2263
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2273 };
2274
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2284 };
2285
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290 0x4778, /* bx pc */
2291 0x46c0 /* nop */
2292 };
2293
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295 address spaces. */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2300 };
2301
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2305 linker first. */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308 /* First bundle: */
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2318 /* Third bundle: */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2322 /* .Lplt_tail: */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2331
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2339 };
2340
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349
2350 enum stub_insn_type
2351 {
2352 THUMB16_TYPE = 1,
2353 THUMB32_TYPE,
2354 ARM_TYPE,
2355 DATA_TYPE
2356 };
2357
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2367
2368 typedef struct
2369 {
2370 bfd_vma data;
2371 enum stub_insn_type type;
2372 unsigned int r_type;
2373 int reloc_addend;
2374 } insn_sequence;
2375
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2379 {
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2382 };
2383
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2385 available. */
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2387 {
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2391 };
2392
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2395 {
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2403 };
2404
2405 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2406 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2407 {
2408 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2409 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2410 };
2411
2412 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2413 allowed. */
2414 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2415 {
2416 THUMB16_INSN (0x4778), /* bx pc */
2417 THUMB16_INSN (0x46c0), /* nop */
2418 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2419 ARM_INSN (0xe12fff1c), /* bx ip */
2420 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2421 };
2422
2423 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2424 available. */
2425 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2426 {
2427 THUMB16_INSN (0x4778), /* bx pc */
2428 THUMB16_INSN (0x46c0), /* nop */
2429 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2430 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2431 };
2432
2433 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2434 one, when the destination is close enough. */
2435 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2436 {
2437 THUMB16_INSN (0x4778), /* bx pc */
2438 THUMB16_INSN (0x46c0), /* nop */
2439 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2440 };
2441
2442 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2443 blx to reach the stub if necessary. */
2444 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2445 {
2446 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2447 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2448 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2449 };
2450
2451 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2452 blx to reach the stub if necessary. We can not add into pc;
2453 it is not guaranteed to mode switch (different in ARMv6 and
2454 ARMv7). */
2455 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2456 {
2457 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2458 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2459 ARM_INSN (0xe12fff1c), /* bx ip */
2460 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2461 };
2462
2463 /* V4T ARM -> ARM long branch stub, PIC. */
2464 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2465 {
2466 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2467 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2468 ARM_INSN (0xe12fff1c), /* bx ip */
2469 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2470 };
2471
2472 /* V4T Thumb -> ARM long branch stub, PIC. */
2473 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2474 {
2475 THUMB16_INSN (0x4778), /* bx pc */
2476 THUMB16_INSN (0x46c0), /* nop */
2477 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2478 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2479 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2480 };
2481
2482 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2483 architectures. */
2484 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2485 {
2486 THUMB16_INSN (0xb401), /* push {r0} */
2487 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2488 THUMB16_INSN (0x46fc), /* mov ip, pc */
2489 THUMB16_INSN (0x4484), /* add ip, r0 */
2490 THUMB16_INSN (0xbc01), /* pop {r0} */
2491 THUMB16_INSN (0x4760), /* bx ip */
2492 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2493 };
2494
2495 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2496 allowed. */
2497 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2498 {
2499 THUMB16_INSN (0x4778), /* bx pc */
2500 THUMB16_INSN (0x46c0), /* nop */
2501 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2502 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2503 ARM_INSN (0xe12fff1c), /* bx ip */
2504 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2505 };
2506
2507 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2508 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2509 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2510 {
2511 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2512 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2513 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2514 };
2515
2516 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2517 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2518 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2519 {
2520 THUMB16_INSN (0x4778), /* bx pc */
2521 THUMB16_INSN (0x46c0), /* nop */
2522 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2523 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2524 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2525 };
2526
2527 /* NaCl ARM -> ARM long branch stub. */
2528 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2529 {
2530 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2531 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2532 ARM_INSN (0xe12fff1c), /* bx ip */
2533 ARM_INSN (0xe320f000), /* nop */
2534 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2535 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2536 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2537 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2538 };
2539
2540 /* NaCl ARM -> ARM long branch stub, PIC. */
2541 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2542 {
2543 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2544 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2545 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2546 ARM_INSN (0xe12fff1c), /* bx ip */
2547 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2548 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2549 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2550 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2551 };
2552
2553
2554 /* Cortex-A8 erratum-workaround stubs. */
2555
2556 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2557 can't use a conditional branch to reach this stub). */
2558
2559 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2560 {
2561 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2562 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2563 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2564 };
2565
2566 /* Stub used for b.w and bl.w instructions. */
2567
2568 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2569 {
2570 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2571 };
2572
2573 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2574 {
2575 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2576 };
2577
2578 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2579 instruction (which switches to ARM mode) to point to this stub. Jump to the
2580 real destination using an ARM-mode branch. */
2581
2582 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2583 {
2584 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2585 };
2586
2587 /* For each section group there can be a specially created linker section
2588 to hold the stubs for that group. The name of the stub section is based
2589 upon the name of another section within that group with the suffix below
2590 applied.
2591
2592 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2593 create what appeared to be a linker stub section when it actually
2594 contained user code/data. For example, consider this fragment:
2595
2596 const char * stubborn_problems[] = { "np" };
2597
2598 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2599 section called:
2600
2601 .data.rel.local.stubborn_problems
2602
2603 This then causes problems in arm32_arm_build_stubs() as it triggers:
2604
2605 // Ignore non-stub sections.
2606 if (!strstr (stub_sec->name, STUB_SUFFIX))
2607 continue;
2608
2609 And so the section would be ignored instead of being processed. Hence
2610 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2611 C identifier. */
2612 #define STUB_SUFFIX ".__stub"
2613
2614 /* One entry per long/short branch stub defined above. */
2615 #define DEF_STUBS \
2616 DEF_STUB(long_branch_any_any) \
2617 DEF_STUB(long_branch_v4t_arm_thumb) \
2618 DEF_STUB(long_branch_thumb_only) \
2619 DEF_STUB(long_branch_v4t_thumb_thumb) \
2620 DEF_STUB(long_branch_v4t_thumb_arm) \
2621 DEF_STUB(short_branch_v4t_thumb_arm) \
2622 DEF_STUB(long_branch_any_arm_pic) \
2623 DEF_STUB(long_branch_any_thumb_pic) \
2624 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2625 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2626 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2627 DEF_STUB(long_branch_thumb_only_pic) \
2628 DEF_STUB(long_branch_any_tls_pic) \
2629 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2630 DEF_STUB(long_branch_arm_nacl) \
2631 DEF_STUB(long_branch_arm_nacl_pic) \
2632 DEF_STUB(a8_veneer_b_cond) \
2633 DEF_STUB(a8_veneer_b) \
2634 DEF_STUB(a8_veneer_bl) \
2635 DEF_STUB(a8_veneer_blx) \
2636 DEF_STUB(long_branch_thumb2_only) \
2637
2638 #define DEF_STUB(x) arm_stub_##x,
2639 enum elf32_arm_stub_type
2640 {
2641 arm_stub_none,
2642 DEF_STUBS
2643 max_stub_type
2644 };
2645 #undef DEF_STUB
2646
2647 /* Note the first a8_veneer type. */
2648 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2649
2650 typedef struct
2651 {
2652 const insn_sequence* template_sequence;
2653 int template_size;
2654 } stub_def;
2655
2656 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2657 static const stub_def stub_definitions[] =
2658 {
2659 {NULL, 0},
2660 DEF_STUBS
2661 };
2662
2663 struct elf32_arm_stub_hash_entry
2664 {
2665 /* Base hash table entry structure. */
2666 struct bfd_hash_entry root;
2667
2668 /* The stub section. */
2669 asection *stub_sec;
2670
2671 /* Offset within stub_sec of the beginning of this stub. */
2672 bfd_vma stub_offset;
2673
2674 /* Given the symbol's value and its section we can determine its final
2675 value when building the stubs (so the stub knows where to jump). */
2676 bfd_vma target_value;
2677 asection *target_section;
2678
2679 /* Same as above but for the source of the branch to the stub. Used for
2680 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2681 such, source section does not need to be recorded since Cortex-A8 erratum
2682 workaround stubs are only generated when both source and target are in the
2683 same section. */
2684 bfd_vma source_value;
2685
2686 /* The instruction which caused this stub to be generated (only valid for
2687 Cortex-A8 erratum workaround stubs at present). */
2688 unsigned long orig_insn;
2689
2690 /* The stub type. */
2691 enum elf32_arm_stub_type stub_type;
2692 /* Its encoding size in bytes. */
2693 int stub_size;
2694 /* Its template. */
2695 const insn_sequence *stub_template;
2696 /* The size of the template (number of entries). */
2697 int stub_template_size;
2698
2699 /* The symbol table entry, if any, that this was derived from. */
2700 struct elf32_arm_link_hash_entry *h;
2701
2702 /* Type of branch. */
2703 enum arm_st_branch_type branch_type;
2704
2705 /* Where this stub is being called from, or, in the case of combined
2706 stub sections, the first input section in the group. */
2707 asection *id_sec;
2708
2709 /* The name for the local symbol at the start of this stub. The
2710 stub name in the hash table has to be unique; this does not, so
2711 it can be friendlier. */
2712 char *output_name;
2713 };
2714
2715 /* Used to build a map of a section. This is required for mixed-endian
2716 code/data. */
2717
2718 typedef struct elf32_elf_section_map
2719 {
2720 bfd_vma vma;
2721 char type;
2722 }
2723 elf32_arm_section_map;
2724
2725 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2726
2727 typedef enum
2728 {
2729 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2730 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2731 VFP11_ERRATUM_ARM_VENEER,
2732 VFP11_ERRATUM_THUMB_VENEER
2733 }
2734 elf32_vfp11_erratum_type;
2735
2736 typedef struct elf32_vfp11_erratum_list
2737 {
2738 struct elf32_vfp11_erratum_list *next;
2739 bfd_vma vma;
2740 union
2741 {
2742 struct
2743 {
2744 struct elf32_vfp11_erratum_list *veneer;
2745 unsigned int vfp_insn;
2746 } b;
2747 struct
2748 {
2749 struct elf32_vfp11_erratum_list *branch;
2750 unsigned int id;
2751 } v;
2752 } u;
2753 elf32_vfp11_erratum_type type;
2754 }
2755 elf32_vfp11_erratum_list;
2756
2757 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2758 veneer. */
2759 typedef enum
2760 {
2761 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2762 STM32L4XX_ERRATUM_VENEER
2763 }
2764 elf32_stm32l4xx_erratum_type;
2765
2766 typedef struct elf32_stm32l4xx_erratum_list
2767 {
2768 struct elf32_stm32l4xx_erratum_list *next;
2769 bfd_vma vma;
2770 union
2771 {
2772 struct
2773 {
2774 struct elf32_stm32l4xx_erratum_list *veneer;
2775 unsigned int insn;
2776 } b;
2777 struct
2778 {
2779 struct elf32_stm32l4xx_erratum_list *branch;
2780 unsigned int id;
2781 } v;
2782 } u;
2783 elf32_stm32l4xx_erratum_type type;
2784 }
2785 elf32_stm32l4xx_erratum_list;
2786
2787 typedef enum
2788 {
2789 DELETE_EXIDX_ENTRY,
2790 INSERT_EXIDX_CANTUNWIND_AT_END
2791 }
2792 arm_unwind_edit_type;
2793
2794 /* A (sorted) list of edits to apply to an unwind table. */
2795 typedef struct arm_unwind_table_edit
2796 {
2797 arm_unwind_edit_type type;
2798 /* Note: we sometimes want to insert an unwind entry corresponding to a
2799 section different from the one we're currently writing out, so record the
2800 (text) section this edit relates to here. */
2801 asection *linked_section;
2802 unsigned int index;
2803 struct arm_unwind_table_edit *next;
2804 }
2805 arm_unwind_table_edit;
2806
2807 typedef struct _arm_elf_section_data
2808 {
2809 /* Information about mapping symbols. */
2810 struct bfd_elf_section_data elf;
2811 unsigned int mapcount;
2812 unsigned int mapsize;
2813 elf32_arm_section_map *map;
2814 /* Information about CPU errata. */
2815 unsigned int erratumcount;
2816 elf32_vfp11_erratum_list *erratumlist;
2817 unsigned int stm32l4xx_erratumcount;
2818 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2819 unsigned int additional_reloc_count;
2820 /* Information about unwind tables. */
2821 union
2822 {
2823 /* Unwind info attached to a text section. */
2824 struct
2825 {
2826 asection *arm_exidx_sec;
2827 } text;
2828
2829 /* Unwind info attached to an .ARM.exidx section. */
2830 struct
2831 {
2832 arm_unwind_table_edit *unwind_edit_list;
2833 arm_unwind_table_edit *unwind_edit_tail;
2834 } exidx;
2835 } u;
2836 }
2837 _arm_elf_section_data;
2838
2839 #define elf32_arm_section_data(sec) \
2840 ((_arm_elf_section_data *) elf_section_data (sec))
2841
2842 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2843 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2844 so may be created multiple times: we use an array of these entries whilst
2845 relaxing which we can refresh easily, then create stubs for each potentially
2846 erratum-triggering instruction once we've settled on a solution. */
2847
2848 struct a8_erratum_fix
2849 {
2850 bfd *input_bfd;
2851 asection *section;
2852 bfd_vma offset;
2853 bfd_vma target_offset;
2854 unsigned long orig_insn;
2855 char *stub_name;
2856 enum elf32_arm_stub_type stub_type;
2857 enum arm_st_branch_type branch_type;
2858 };
2859
2860 /* A table of relocs applied to branches which might trigger Cortex-A8
2861 erratum. */
2862
2863 struct a8_erratum_reloc
2864 {
2865 bfd_vma from;
2866 bfd_vma destination;
2867 struct elf32_arm_link_hash_entry *hash;
2868 const char *sym_name;
2869 unsigned int r_type;
2870 enum arm_st_branch_type branch_type;
2871 bfd_boolean non_a8_stub;
2872 };
2873
2874 /* The size of the thread control block. */
2875 #define TCB_SIZE 8
2876
2877 /* ARM-specific information about a PLT entry, over and above the usual
2878 gotplt_union. */
2879 struct arm_plt_info
2880 {
2881 /* We reference count Thumb references to a PLT entry separately,
2882 so that we can emit the Thumb trampoline only if needed. */
2883 bfd_signed_vma thumb_refcount;
2884
2885 /* Some references from Thumb code may be eliminated by BL->BLX
2886 conversion, so record them separately. */
2887 bfd_signed_vma maybe_thumb_refcount;
2888
2889 /* How many of the recorded PLT accesses were from non-call relocations.
2890 This information is useful when deciding whether anything takes the
2891 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2892 non-call references to the function should resolve directly to the
2893 real runtime target. */
2894 unsigned int noncall_refcount;
2895
2896 /* Since PLT entries have variable size if the Thumb prologue is
2897 used, we need to record the index into .got.plt instead of
2898 recomputing it from the PLT offset. */
2899 bfd_signed_vma got_offset;
2900 };
2901
2902 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2903 struct arm_local_iplt_info
2904 {
2905 /* The information that is usually found in the generic ELF part of
2906 the hash table entry. */
2907 union gotplt_union root;
2908
2909 /* The information that is usually found in the ARM-specific part of
2910 the hash table entry. */
2911 struct arm_plt_info arm;
2912
2913 /* A list of all potential dynamic relocations against this symbol. */
2914 struct elf_dyn_relocs *dyn_relocs;
2915 };
2916
2917 struct elf_arm_obj_tdata
2918 {
2919 struct elf_obj_tdata root;
2920
2921 /* tls_type for each local got entry. */
2922 char *local_got_tls_type;
2923
2924 /* GOTPLT entries for TLS descriptors. */
2925 bfd_vma *local_tlsdesc_gotent;
2926
2927 /* Information for local symbols that need entries in .iplt. */
2928 struct arm_local_iplt_info **local_iplt;
2929
2930 /* Zero to warn when linking objects with incompatible enum sizes. */
2931 int no_enum_size_warning;
2932
2933 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2934 int no_wchar_size_warning;
2935 };
2936
2937 #define elf_arm_tdata(bfd) \
2938 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2939
2940 #define elf32_arm_local_got_tls_type(bfd) \
2941 (elf_arm_tdata (bfd)->local_got_tls_type)
2942
2943 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2944 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2945
2946 #define elf32_arm_local_iplt(bfd) \
2947 (elf_arm_tdata (bfd)->local_iplt)
2948
2949 #define is_arm_elf(bfd) \
2950 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2951 && elf_tdata (bfd) != NULL \
2952 && elf_object_id (bfd) == ARM_ELF_DATA)
2953
2954 static bfd_boolean
2955 elf32_arm_mkobject (bfd *abfd)
2956 {
2957 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2958 ARM_ELF_DATA);
2959 }
2960
2961 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2962
2963 /* Arm ELF linker hash entry. */
2964 struct elf32_arm_link_hash_entry
2965 {
2966 struct elf_link_hash_entry root;
2967
2968 /* Track dynamic relocs copied for this symbol. */
2969 struct elf_dyn_relocs *dyn_relocs;
2970
2971 /* ARM-specific PLT information. */
2972 struct arm_plt_info plt;
2973
2974 #define GOT_UNKNOWN 0
2975 #define GOT_NORMAL 1
2976 #define GOT_TLS_GD 2
2977 #define GOT_TLS_IE 4
2978 #define GOT_TLS_GDESC 8
2979 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2980 unsigned int tls_type : 8;
2981
2982 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2983 unsigned int is_iplt : 1;
2984
2985 unsigned int unused : 23;
2986
2987 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2988 starting at the end of the jump table. */
2989 bfd_vma tlsdesc_got;
2990
2991 /* The symbol marking the real symbol location for exported thumb
2992 symbols with Arm stubs. */
2993 struct elf_link_hash_entry *export_glue;
2994
2995 /* A pointer to the most recently used stub hash entry against this
2996 symbol. */
2997 struct elf32_arm_stub_hash_entry *stub_cache;
2998 };
2999
3000 /* Traverse an arm ELF linker hash table. */
3001 #define elf32_arm_link_hash_traverse(table, func, info) \
3002 (elf_link_hash_traverse \
3003 (&(table)->root, \
3004 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3005 (info)))
3006
3007 /* Get the ARM elf linker hash table from a link_info structure. */
3008 #define elf32_arm_hash_table(info) \
3009 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3010 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3011
3012 #define arm_stub_hash_lookup(table, string, create, copy) \
3013 ((struct elf32_arm_stub_hash_entry *) \
3014 bfd_hash_lookup ((table), (string), (create), (copy)))
3015
3016 /* Array to keep track of which stub sections have been created, and
3017 information on stub grouping. */
3018 struct map_stub
3019 {
3020 /* This is the section to which stubs in the group will be
3021 attached. */
3022 asection *link_sec;
3023 /* The stub section. */
3024 asection *stub_sec;
3025 };
3026
3027 #define elf32_arm_compute_jump_table_size(htab) \
3028 ((htab)->next_tls_desc_index * 4)
3029
3030 /* ARM ELF linker hash table. */
3031 struct elf32_arm_link_hash_table
3032 {
3033 /* The main hash table. */
3034 struct elf_link_hash_table root;
3035
3036 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3037 bfd_size_type thumb_glue_size;
3038
3039 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3040 bfd_size_type arm_glue_size;
3041
3042 /* The size in bytes of section containing the ARMv4 BX veneers. */
3043 bfd_size_type bx_glue_size;
3044
3045 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3046 veneer has been populated. */
3047 bfd_vma bx_glue_offset[15];
3048
3049 /* The size in bytes of the section containing glue for VFP11 erratum
3050 veneers. */
3051 bfd_size_type vfp11_erratum_glue_size;
3052
3053 /* The size in bytes of the section containing glue for STM32L4XX erratum
3054 veneers. */
3055 bfd_size_type stm32l4xx_erratum_glue_size;
3056
3057 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3058 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3059 elf32_arm_write_section(). */
3060 struct a8_erratum_fix *a8_erratum_fixes;
3061 unsigned int num_a8_erratum_fixes;
3062
3063 /* An arbitrary input BFD chosen to hold the glue sections. */
3064 bfd * bfd_of_glue_owner;
3065
3066 /* Nonzero to output a BE8 image. */
3067 int byteswap_code;
3068
3069 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3070 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3071 int target1_is_rel;
3072
3073 /* The relocation to use for R_ARM_TARGET2 relocations. */
3074 int target2_reloc;
3075
3076 /* 0 = Ignore R_ARM_V4BX.
3077 1 = Convert BX to MOV PC.
3078 2 = Generate v4 interworing stubs. */
3079 int fix_v4bx;
3080
3081 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3082 int fix_cortex_a8;
3083
3084 /* Whether we should fix the ARM1176 BLX immediate issue. */
3085 int fix_arm1176;
3086
3087 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3088 int use_blx;
3089
3090 /* What sort of code sequences we should look for which may trigger the
3091 VFP11 denorm erratum. */
3092 bfd_arm_vfp11_fix vfp11_fix;
3093
3094 /* Global counter for the number of fixes we have emitted. */
3095 int num_vfp11_fixes;
3096
3097 /* What sort of code sequences we should look for which may trigger the
3098 STM32L4XX erratum. */
3099 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3100
3101 /* Global counter for the number of fixes we have emitted. */
3102 int num_stm32l4xx_fixes;
3103
3104 /* Nonzero to force PIC branch veneers. */
3105 int pic_veneer;
3106
3107 /* The number of bytes in the initial entry in the PLT. */
3108 bfd_size_type plt_header_size;
3109
3110 /* The number of bytes in the subsequent PLT etries. */
3111 bfd_size_type plt_entry_size;
3112
3113 /* True if the target system is VxWorks. */
3114 int vxworks_p;
3115
3116 /* True if the target system is Symbian OS. */
3117 int symbian_p;
3118
3119 /* True if the target system is Native Client. */
3120 int nacl_p;
3121
3122 /* True if the target uses REL relocations. */
3123 int use_rel;
3124
3125 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3126 bfd_vma next_tls_desc_index;
3127
3128 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3129 bfd_vma num_tls_desc;
3130
3131 /* Short-cuts to get to dynamic linker sections. */
3132 asection *sdynbss;
3133 asection *srelbss;
3134
3135 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3136 asection *srelplt2;
3137
3138 /* The offset into splt of the PLT entry for the TLS descriptor
3139 resolver. Special values are 0, if not necessary (or not found
3140 to be necessary yet), and -1 if needed but not determined
3141 yet. */
3142 bfd_vma dt_tlsdesc_plt;
3143
3144 /* The offset into sgot of the GOT entry used by the PLT entry
3145 above. */
3146 bfd_vma dt_tlsdesc_got;
3147
3148 /* Offset in .plt section of tls_arm_trampoline. */
3149 bfd_vma tls_trampoline;
3150
3151 /* Data for R_ARM_TLS_LDM32 relocations. */
3152 union
3153 {
3154 bfd_signed_vma refcount;
3155 bfd_vma offset;
3156 } tls_ldm_got;
3157
3158 /* Small local sym cache. */
3159 struct sym_cache sym_cache;
3160
3161 /* For convenience in allocate_dynrelocs. */
3162 bfd * obfd;
3163
3164 /* The amount of space used by the reserved portion of the sgotplt
3165 section, plus whatever space is used by the jump slots. */
3166 bfd_vma sgotplt_jump_table_size;
3167
3168 /* The stub hash table. */
3169 struct bfd_hash_table stub_hash_table;
3170
3171 /* Linker stub bfd. */
3172 bfd *stub_bfd;
3173
3174 /* Linker call-backs. */
3175 asection * (*add_stub_section) (const char *, asection *, asection *,
3176 unsigned int);
3177 void (*layout_sections_again) (void);
3178
3179 /* Array to keep track of which stub sections have been created, and
3180 information on stub grouping. */
3181 struct map_stub *stub_group;
3182
3183 /* Number of elements in stub_group. */
3184 unsigned int top_id;
3185
3186 /* Assorted information used by elf32_arm_size_stubs. */
3187 unsigned int bfd_count;
3188 unsigned int top_index;
3189 asection **input_list;
3190 };
3191
3192 static inline int
3193 ctz (unsigned int mask)
3194 {
3195 #if GCC_VERSION >= 3004
3196 return __builtin_ctz (mask);
3197 #else
3198 unsigned int i;
3199
3200 for (i = 0; i < 8 * sizeof (mask); i++)
3201 {
3202 if (mask & 0x1)
3203 break;
3204 mask = (mask >> 1);
3205 }
3206 return i;
3207 #endif
3208 }
3209
3210 static inline int
3211 popcount (unsigned int mask)
3212 {
3213 #if GCC_VERSION >= 3004
3214 return __builtin_popcount (mask);
3215 #else
3216 unsigned int i, sum = 0;
3217
3218 for (i = 0; i < 8 * sizeof (mask); i++)
3219 {
3220 if (mask & 0x1)
3221 sum++;
3222 mask = (mask >> 1);
3223 }
3224 return sum;
3225 #endif
3226 }
3227
3228 /* Create an entry in an ARM ELF linker hash table. */
3229
3230 static struct bfd_hash_entry *
3231 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3232 struct bfd_hash_table * table,
3233 const char * string)
3234 {
3235 struct elf32_arm_link_hash_entry * ret =
3236 (struct elf32_arm_link_hash_entry *) entry;
3237
3238 /* Allocate the structure if it has not already been allocated by a
3239 subclass. */
3240 if (ret == NULL)
3241 ret = (struct elf32_arm_link_hash_entry *)
3242 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3243 if (ret == NULL)
3244 return (struct bfd_hash_entry *) ret;
3245
3246 /* Call the allocation method of the superclass. */
3247 ret = ((struct elf32_arm_link_hash_entry *)
3248 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3249 table, string));
3250 if (ret != NULL)
3251 {
3252 ret->dyn_relocs = NULL;
3253 ret->tls_type = GOT_UNKNOWN;
3254 ret->tlsdesc_got = (bfd_vma) -1;
3255 ret->plt.thumb_refcount = 0;
3256 ret->plt.maybe_thumb_refcount = 0;
3257 ret->plt.noncall_refcount = 0;
3258 ret->plt.got_offset = -1;
3259 ret->is_iplt = FALSE;
3260 ret->export_glue = NULL;
3261
3262 ret->stub_cache = NULL;
3263 }
3264
3265 return (struct bfd_hash_entry *) ret;
3266 }
3267
3268 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3269 symbols. */
3270
3271 static bfd_boolean
3272 elf32_arm_allocate_local_sym_info (bfd *abfd)
3273 {
3274 if (elf_local_got_refcounts (abfd) == NULL)
3275 {
3276 bfd_size_type num_syms;
3277 bfd_size_type size;
3278 char *data;
3279
3280 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3281 size = num_syms * (sizeof (bfd_signed_vma)
3282 + sizeof (struct arm_local_iplt_info *)
3283 + sizeof (bfd_vma)
3284 + sizeof (char));
3285 data = bfd_zalloc (abfd, size);
3286 if (data == NULL)
3287 return FALSE;
3288
3289 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3290 data += num_syms * sizeof (bfd_signed_vma);
3291
3292 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3293 data += num_syms * sizeof (struct arm_local_iplt_info *);
3294
3295 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3296 data += num_syms * sizeof (bfd_vma);
3297
3298 elf32_arm_local_got_tls_type (abfd) = data;
3299 }
3300 return TRUE;
3301 }
3302
3303 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3304 to input bfd ABFD. Create the information if it doesn't already exist.
3305 Return null if an allocation fails. */
3306
3307 static struct arm_local_iplt_info *
3308 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3309 {
3310 struct arm_local_iplt_info **ptr;
3311
3312 if (!elf32_arm_allocate_local_sym_info (abfd))
3313 return NULL;
3314
3315 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3316 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3317 if (*ptr == NULL)
3318 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3319 return *ptr;
3320 }
3321
3322 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3323 in ABFD's symbol table. If the symbol is global, H points to its
3324 hash table entry, otherwise H is null.
3325
3326 Return true if the symbol does have PLT information. When returning
3327 true, point *ROOT_PLT at the target-independent reference count/offset
3328 union and *ARM_PLT at the ARM-specific information. */
3329
3330 static bfd_boolean
3331 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3332 unsigned long r_symndx, union gotplt_union **root_plt,
3333 struct arm_plt_info **arm_plt)
3334 {
3335 struct arm_local_iplt_info *local_iplt;
3336
3337 if (h != NULL)
3338 {
3339 *root_plt = &h->root.plt;
3340 *arm_plt = &h->plt;
3341 return TRUE;
3342 }
3343
3344 if (elf32_arm_local_iplt (abfd) == NULL)
3345 return FALSE;
3346
3347 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3348 if (local_iplt == NULL)
3349 return FALSE;
3350
3351 *root_plt = &local_iplt->root;
3352 *arm_plt = &local_iplt->arm;
3353 return TRUE;
3354 }
3355
3356 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3357 before it. */
3358
3359 static bfd_boolean
3360 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3361 struct arm_plt_info *arm_plt)
3362 {
3363 struct elf32_arm_link_hash_table *htab;
3364
3365 htab = elf32_arm_hash_table (info);
3366 return (arm_plt->thumb_refcount != 0
3367 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3368 }
3369
3370 /* Return a pointer to the head of the dynamic reloc list that should
3371 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3372 ABFD's symbol table. Return null if an error occurs. */
3373
3374 static struct elf_dyn_relocs **
3375 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3376 Elf_Internal_Sym *isym)
3377 {
3378 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3379 {
3380 struct arm_local_iplt_info *local_iplt;
3381
3382 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3383 if (local_iplt == NULL)
3384 return NULL;
3385 return &local_iplt->dyn_relocs;
3386 }
3387 else
3388 {
3389 /* Track dynamic relocs needed for local syms too.
3390 We really need local syms available to do this
3391 easily. Oh well. */
3392 asection *s;
3393 void *vpp;
3394
3395 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3396 if (s == NULL)
3397 abort ();
3398
3399 vpp = &elf_section_data (s)->local_dynrel;
3400 return (struct elf_dyn_relocs **) vpp;
3401 }
3402 }
3403
3404 /* Initialize an entry in the stub hash table. */
3405
3406 static struct bfd_hash_entry *
3407 stub_hash_newfunc (struct bfd_hash_entry *entry,
3408 struct bfd_hash_table *table,
3409 const char *string)
3410 {
3411 /* Allocate the structure if it has not already been allocated by a
3412 subclass. */
3413 if (entry == NULL)
3414 {
3415 entry = (struct bfd_hash_entry *)
3416 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3417 if (entry == NULL)
3418 return entry;
3419 }
3420
3421 /* Call the allocation method of the superclass. */
3422 entry = bfd_hash_newfunc (entry, table, string);
3423 if (entry != NULL)
3424 {
3425 struct elf32_arm_stub_hash_entry *eh;
3426
3427 /* Initialize the local fields. */
3428 eh = (struct elf32_arm_stub_hash_entry *) entry;
3429 eh->stub_sec = NULL;
3430 eh->stub_offset = 0;
3431 eh->source_value = 0;
3432 eh->target_value = 0;
3433 eh->target_section = NULL;
3434 eh->orig_insn = 0;
3435 eh->stub_type = arm_stub_none;
3436 eh->stub_size = 0;
3437 eh->stub_template = NULL;
3438 eh->stub_template_size = 0;
3439 eh->h = NULL;
3440 eh->id_sec = NULL;
3441 eh->output_name = NULL;
3442 }
3443
3444 return entry;
3445 }
3446
3447 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3448 shortcuts to them in our hash table. */
3449
3450 static bfd_boolean
3451 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3452 {
3453 struct elf32_arm_link_hash_table *htab;
3454
3455 htab = elf32_arm_hash_table (info);
3456 if (htab == NULL)
3457 return FALSE;
3458
3459 /* BPABI objects never have a GOT, or associated sections. */
3460 if (htab->symbian_p)
3461 return TRUE;
3462
3463 if (! _bfd_elf_create_got_section (dynobj, info))
3464 return FALSE;
3465
3466 return TRUE;
3467 }
3468
3469 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3470
3471 static bfd_boolean
3472 create_ifunc_sections (struct bfd_link_info *info)
3473 {
3474 struct elf32_arm_link_hash_table *htab;
3475 const struct elf_backend_data *bed;
3476 bfd *dynobj;
3477 asection *s;
3478 flagword flags;
3479
3480 htab = elf32_arm_hash_table (info);
3481 dynobj = htab->root.dynobj;
3482 bed = get_elf_backend_data (dynobj);
3483 flags = bed->dynamic_sec_flags;
3484
3485 if (htab->root.iplt == NULL)
3486 {
3487 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3488 flags | SEC_READONLY | SEC_CODE);
3489 if (s == NULL
3490 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3491 return FALSE;
3492 htab->root.iplt = s;
3493 }
3494
3495 if (htab->root.irelplt == NULL)
3496 {
3497 s = bfd_make_section_anyway_with_flags (dynobj,
3498 RELOC_SECTION (htab, ".iplt"),
3499 flags | SEC_READONLY);
3500 if (s == NULL
3501 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3502 return FALSE;
3503 htab->root.irelplt = s;
3504 }
3505
3506 if (htab->root.igotplt == NULL)
3507 {
3508 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3509 if (s == NULL
3510 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3511 return FALSE;
3512 htab->root.igotplt = s;
3513 }
3514 return TRUE;
3515 }
3516
3517 /* Determine if we're dealing with a Thumb only architecture. */
3518
3519 static bfd_boolean
3520 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3521 {
3522 int arch;
3523 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3524 Tag_CPU_arch_profile);
3525
3526 if (profile)
3527 return profile == 'M';
3528
3529 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3530
3531 /* Force return logic to be reviewed for each new architecture. */
3532 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3533 || arch == TAG_CPU_ARCH_V8M_BASE
3534 || arch == TAG_CPU_ARCH_V8M_MAIN);
3535
3536 if (arch == TAG_CPU_ARCH_V6_M
3537 || arch == TAG_CPU_ARCH_V6S_M
3538 || arch == TAG_CPU_ARCH_V7E_M
3539 || arch == TAG_CPU_ARCH_V8M_BASE
3540 || arch == TAG_CPU_ARCH_V8M_MAIN)
3541 return TRUE;
3542
3543 return FALSE;
3544 }
3545
3546 /* Determine if we're dealing with a Thumb-2 object. */
3547
3548 static bfd_boolean
3549 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3550 {
3551 int arch;
3552 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3553 Tag_THUMB_ISA_use);
3554
3555 if (thumb_isa)
3556 return thumb_isa == 2;
3557
3558 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3559
3560 /* Force return logic to be reviewed for each new architecture. */
3561 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3562 || arch == TAG_CPU_ARCH_V8M_BASE
3563 || arch == TAG_CPU_ARCH_V8M_MAIN);
3564
3565 return (arch == TAG_CPU_ARCH_V6T2
3566 || arch == TAG_CPU_ARCH_V7
3567 || arch == TAG_CPU_ARCH_V7E_M
3568 || arch == TAG_CPU_ARCH_V8
3569 || arch == TAG_CPU_ARCH_V8M_MAIN);
3570 }
3571
3572 /* Determine whether Thumb-2 BL instruction is available. */
3573
3574 static bfd_boolean
3575 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3576 {
3577 int arch =
3578 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3579
3580 /* Force return logic to be reviewed for each new architecture. */
3581 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3582 || arch == TAG_CPU_ARCH_V8M_BASE
3583 || arch == TAG_CPU_ARCH_V8M_MAIN);
3584
3585 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3586 return (arch == TAG_CPU_ARCH_V6T2
3587 || arch >= TAG_CPU_ARCH_V7);
3588 }
3589
3590 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3591 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3592 hash table. */
3593
3594 static bfd_boolean
3595 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3596 {
3597 struct elf32_arm_link_hash_table *htab;
3598
3599 htab = elf32_arm_hash_table (info);
3600 if (htab == NULL)
3601 return FALSE;
3602
3603 if (!htab->root.sgot && !create_got_section (dynobj, info))
3604 return FALSE;
3605
3606 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3607 return FALSE;
3608
3609 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3610 if (!bfd_link_pic (info))
3611 htab->srelbss = bfd_get_linker_section (dynobj,
3612 RELOC_SECTION (htab, ".bss"));
3613
3614 if (htab->vxworks_p)
3615 {
3616 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3617 return FALSE;
3618
3619 if (bfd_link_pic (info))
3620 {
3621 htab->plt_header_size = 0;
3622 htab->plt_entry_size
3623 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3624 }
3625 else
3626 {
3627 htab->plt_header_size
3628 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3629 htab->plt_entry_size
3630 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3631 }
3632
3633 if (elf_elfheader (dynobj))
3634 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3635 }
3636 else
3637 {
3638 /* PR ld/16017
3639 Test for thumb only architectures. Note - we cannot just call
3640 using_thumb_only() as the attributes in the output bfd have not been
3641 initialised at this point, so instead we use the input bfd. */
3642 bfd * saved_obfd = htab->obfd;
3643
3644 htab->obfd = dynobj;
3645 if (using_thumb_only (htab))
3646 {
3647 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3648 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3649 }
3650 htab->obfd = saved_obfd;
3651 }
3652
3653 if (!htab->root.splt
3654 || !htab->root.srelplt
3655 || !htab->sdynbss
3656 || (!bfd_link_pic (info) && !htab->srelbss))
3657 abort ();
3658
3659 return TRUE;
3660 }
3661
3662 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3663
3664 static void
3665 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3666 struct elf_link_hash_entry *dir,
3667 struct elf_link_hash_entry *ind)
3668 {
3669 struct elf32_arm_link_hash_entry *edir, *eind;
3670
3671 edir = (struct elf32_arm_link_hash_entry *) dir;
3672 eind = (struct elf32_arm_link_hash_entry *) ind;
3673
3674 if (eind->dyn_relocs != NULL)
3675 {
3676 if (edir->dyn_relocs != NULL)
3677 {
3678 struct elf_dyn_relocs **pp;
3679 struct elf_dyn_relocs *p;
3680
3681 /* Add reloc counts against the indirect sym to the direct sym
3682 list. Merge any entries against the same section. */
3683 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3684 {
3685 struct elf_dyn_relocs *q;
3686
3687 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3688 if (q->sec == p->sec)
3689 {
3690 q->pc_count += p->pc_count;
3691 q->count += p->count;
3692 *pp = p->next;
3693 break;
3694 }
3695 if (q == NULL)
3696 pp = &p->next;
3697 }
3698 *pp = edir->dyn_relocs;
3699 }
3700
3701 edir->dyn_relocs = eind->dyn_relocs;
3702 eind->dyn_relocs = NULL;
3703 }
3704
3705 if (ind->root.type == bfd_link_hash_indirect)
3706 {
3707 /* Copy over PLT info. */
3708 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3709 eind->plt.thumb_refcount = 0;
3710 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3711 eind->plt.maybe_thumb_refcount = 0;
3712 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3713 eind->plt.noncall_refcount = 0;
3714
3715 /* We should only allocate a function to .iplt once the final
3716 symbol information is known. */
3717 BFD_ASSERT (!eind->is_iplt);
3718
3719 if (dir->got.refcount <= 0)
3720 {
3721 edir->tls_type = eind->tls_type;
3722 eind->tls_type = GOT_UNKNOWN;
3723 }
3724 }
3725
3726 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3727 }
3728
3729 /* Destroy an ARM elf linker hash table. */
3730
3731 static void
3732 elf32_arm_link_hash_table_free (bfd *obfd)
3733 {
3734 struct elf32_arm_link_hash_table *ret
3735 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3736
3737 bfd_hash_table_free (&ret->stub_hash_table);
3738 _bfd_elf_link_hash_table_free (obfd);
3739 }
3740
3741 /* Create an ARM elf linker hash table. */
3742
3743 static struct bfd_link_hash_table *
3744 elf32_arm_link_hash_table_create (bfd *abfd)
3745 {
3746 struct elf32_arm_link_hash_table *ret;
3747 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3748
3749 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3750 if (ret == NULL)
3751 return NULL;
3752
3753 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3754 elf32_arm_link_hash_newfunc,
3755 sizeof (struct elf32_arm_link_hash_entry),
3756 ARM_ELF_DATA))
3757 {
3758 free (ret);
3759 return NULL;
3760 }
3761
3762 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3763 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3764 #ifdef FOUR_WORD_PLT
3765 ret->plt_header_size = 16;
3766 ret->plt_entry_size = 16;
3767 #else
3768 ret->plt_header_size = 20;
3769 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3770 #endif
3771 ret->use_rel = 1;
3772 ret->obfd = abfd;
3773
3774 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3775 sizeof (struct elf32_arm_stub_hash_entry)))
3776 {
3777 _bfd_elf_link_hash_table_free (abfd);
3778 return NULL;
3779 }
3780 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3781
3782 return &ret->root.root;
3783 }
3784
3785 /* Determine what kind of NOPs are available. */
3786
3787 static bfd_boolean
3788 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3789 {
3790 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3791 Tag_CPU_arch);
3792
3793 /* Force return logic to be reviewed for each new architecture. */
3794 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3795 || arch == TAG_CPU_ARCH_V8M_BASE
3796 || arch == TAG_CPU_ARCH_V8M_MAIN);
3797
3798 return (arch == TAG_CPU_ARCH_V6T2
3799 || arch == TAG_CPU_ARCH_V6K
3800 || arch == TAG_CPU_ARCH_V7
3801 || arch == TAG_CPU_ARCH_V8);
3802 }
3803
3804 static bfd_boolean
3805 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3806 {
3807 switch (stub_type)
3808 {
3809 case arm_stub_long_branch_thumb_only:
3810 case arm_stub_long_branch_thumb2_only:
3811 case arm_stub_long_branch_v4t_thumb_arm:
3812 case arm_stub_short_branch_v4t_thumb_arm:
3813 case arm_stub_long_branch_v4t_thumb_arm_pic:
3814 case arm_stub_long_branch_v4t_thumb_tls_pic:
3815 case arm_stub_long_branch_thumb_only_pic:
3816 return TRUE;
3817 case arm_stub_none:
3818 BFD_FAIL ();
3819 return FALSE;
3820 break;
3821 default:
3822 return FALSE;
3823 }
3824 }
3825
3826 /* Determine the type of stub needed, if any, for a call. */
3827
3828 static enum elf32_arm_stub_type
3829 arm_type_of_stub (struct bfd_link_info *info,
3830 asection *input_sec,
3831 const Elf_Internal_Rela *rel,
3832 unsigned char st_type,
3833 enum arm_st_branch_type *actual_branch_type,
3834 struct elf32_arm_link_hash_entry *hash,
3835 bfd_vma destination,
3836 asection *sym_sec,
3837 bfd *input_bfd,
3838 const char *name)
3839 {
3840 bfd_vma location;
3841 bfd_signed_vma branch_offset;
3842 unsigned int r_type;
3843 struct elf32_arm_link_hash_table * globals;
3844 bfd_boolean thumb2, thumb2_bl, thumb_only;
3845 enum elf32_arm_stub_type stub_type = arm_stub_none;
3846 int use_plt = 0;
3847 enum arm_st_branch_type branch_type = *actual_branch_type;
3848 union gotplt_union *root_plt;
3849 struct arm_plt_info *arm_plt;
3850
3851 if (branch_type == ST_BRANCH_LONG)
3852 return stub_type;
3853
3854 globals = elf32_arm_hash_table (info);
3855 if (globals == NULL)
3856 return stub_type;
3857
3858 thumb_only = using_thumb_only (globals);
3859 thumb2 = using_thumb2 (globals);
3860 thumb2_bl = using_thumb2_bl (globals);
3861
3862 /* Determine where the call point is. */
3863 location = (input_sec->output_offset
3864 + input_sec->output_section->vma
3865 + rel->r_offset);
3866
3867 r_type = ELF32_R_TYPE (rel->r_info);
3868
3869 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3870 are considering a function call relocation. */
3871 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3872 || r_type == R_ARM_THM_JUMP19)
3873 && branch_type == ST_BRANCH_TO_ARM)
3874 branch_type = ST_BRANCH_TO_THUMB;
3875
3876 /* For TLS call relocs, it is the caller's responsibility to provide
3877 the address of the appropriate trampoline. */
3878 if (r_type != R_ARM_TLS_CALL
3879 && r_type != R_ARM_THM_TLS_CALL
3880 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3881 &root_plt, &arm_plt)
3882 && root_plt->offset != (bfd_vma) -1)
3883 {
3884 asection *splt;
3885
3886 if (hash == NULL || hash->is_iplt)
3887 splt = globals->root.iplt;
3888 else
3889 splt = globals->root.splt;
3890 if (splt != NULL)
3891 {
3892 use_plt = 1;
3893
3894 /* Note when dealing with PLT entries: the main PLT stub is in
3895 ARM mode, so if the branch is in Thumb mode, another
3896 Thumb->ARM stub will be inserted later just before the ARM
3897 PLT stub. We don't take this extra distance into account
3898 here, because if a long branch stub is needed, we'll add a
3899 Thumb->Arm one and branch directly to the ARM PLT entry
3900 because it avoids spreading offset corrections in several
3901 places. */
3902
3903 destination = (splt->output_section->vma
3904 + splt->output_offset
3905 + root_plt->offset);
3906 st_type = STT_FUNC;
3907 branch_type = ST_BRANCH_TO_ARM;
3908 }
3909 }
3910 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3911 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3912
3913 branch_offset = (bfd_signed_vma)(destination - location);
3914
3915 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3916 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3917 {
3918 /* Handle cases where:
3919 - this call goes too far (different Thumb/Thumb2 max
3920 distance)
3921 - it's a Thumb->Arm call and blx is not available, or it's a
3922 Thumb->Arm branch (not bl). A stub is needed in this case,
3923 but only if this call is not through a PLT entry. Indeed,
3924 PLT stubs handle mode switching already.
3925 */
3926 if ((!thumb2_bl
3927 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3928 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3929 || (thumb2_bl
3930 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3931 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3932 || (thumb2
3933 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3934 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3935 && (r_type == R_ARM_THM_JUMP19))
3936 || (branch_type == ST_BRANCH_TO_ARM
3937 && (((r_type == R_ARM_THM_CALL
3938 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3939 || (r_type == R_ARM_THM_JUMP24)
3940 || (r_type == R_ARM_THM_JUMP19))
3941 && !use_plt))
3942 {
3943 if (branch_type == ST_BRANCH_TO_THUMB)
3944 {
3945 /* Thumb to thumb. */
3946 if (!thumb_only)
3947 {
3948 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3949 /* PIC stubs. */
3950 ? ((globals->use_blx
3951 && (r_type == R_ARM_THM_CALL))
3952 /* V5T and above. Stub starts with ARM code, so
3953 we must be able to switch mode before
3954 reaching it, which is only possible for 'bl'
3955 (ie R_ARM_THM_CALL relocation). */
3956 ? arm_stub_long_branch_any_thumb_pic
3957 /* On V4T, use Thumb code only. */
3958 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3959
3960 /* non-PIC stubs. */
3961 : ((globals->use_blx
3962 && (r_type == R_ARM_THM_CALL))
3963 /* V5T and above. */
3964 ? arm_stub_long_branch_any_any
3965 /* V4T. */
3966 : arm_stub_long_branch_v4t_thumb_thumb);
3967 }
3968 else
3969 {
3970 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3971 /* PIC stub. */
3972 ? arm_stub_long_branch_thumb_only_pic
3973 /* non-PIC stub. */
3974 : (thumb2 ? arm_stub_long_branch_thumb2_only
3975 : arm_stub_long_branch_thumb_only);
3976 }
3977 }
3978 else
3979 {
3980 /* Thumb to arm. */
3981 if (sym_sec != NULL
3982 && sym_sec->owner != NULL
3983 && !INTERWORK_FLAG (sym_sec->owner))
3984 {
3985 (*_bfd_error_handler)
3986 (_("%B(%s): warning: interworking not enabled.\n"
3987 " first occurrence: %B: Thumb call to ARM"),
3988 sym_sec->owner, input_bfd, name);
3989 }
3990
3991 stub_type =
3992 (bfd_link_pic (info) | globals->pic_veneer)
3993 /* PIC stubs. */
3994 ? (r_type == R_ARM_THM_TLS_CALL
3995 /* TLS PIC stubs. */
3996 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3997 : arm_stub_long_branch_v4t_thumb_tls_pic)
3998 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3999 /* V5T PIC and above. */
4000 ? arm_stub_long_branch_any_arm_pic
4001 /* V4T PIC stub. */
4002 : arm_stub_long_branch_v4t_thumb_arm_pic))
4003
4004 /* non-PIC stubs. */
4005 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4006 /* V5T and above. */
4007 ? arm_stub_long_branch_any_any
4008 /* V4T. */
4009 : arm_stub_long_branch_v4t_thumb_arm);
4010
4011 /* Handle v4t short branches. */
4012 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4013 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4014 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4015 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4016 }
4017 }
4018 }
4019 else if (r_type == R_ARM_CALL
4020 || r_type == R_ARM_JUMP24
4021 || r_type == R_ARM_PLT32
4022 || r_type == R_ARM_TLS_CALL)
4023 {
4024 if (branch_type == ST_BRANCH_TO_THUMB)
4025 {
4026 /* Arm to thumb. */
4027
4028 if (sym_sec != NULL
4029 && sym_sec->owner != NULL
4030 && !INTERWORK_FLAG (sym_sec->owner))
4031 {
4032 (*_bfd_error_handler)
4033 (_("%B(%s): warning: interworking not enabled.\n"
4034 " first occurrence: %B: ARM call to Thumb"),
4035 sym_sec->owner, input_bfd, name);
4036 }
4037
4038 /* We have an extra 2-bytes reach because of
4039 the mode change (bit 24 (H) of BLX encoding). */
4040 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4041 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4042 || (r_type == R_ARM_CALL && !globals->use_blx)
4043 || (r_type == R_ARM_JUMP24)
4044 || (r_type == R_ARM_PLT32))
4045 {
4046 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4047 /* PIC stubs. */
4048 ? ((globals->use_blx)
4049 /* V5T and above. */
4050 ? arm_stub_long_branch_any_thumb_pic
4051 /* V4T stub. */
4052 : arm_stub_long_branch_v4t_arm_thumb_pic)
4053
4054 /* non-PIC stubs. */
4055 : ((globals->use_blx)
4056 /* V5T and above. */
4057 ? arm_stub_long_branch_any_any
4058 /* V4T. */
4059 : arm_stub_long_branch_v4t_arm_thumb);
4060 }
4061 }
4062 else
4063 {
4064 /* Arm to arm. */
4065 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4066 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4067 {
4068 stub_type =
4069 (bfd_link_pic (info) | globals->pic_veneer)
4070 /* PIC stubs. */
4071 ? (r_type == R_ARM_TLS_CALL
4072 /* TLS PIC Stub. */
4073 ? arm_stub_long_branch_any_tls_pic
4074 : (globals->nacl_p
4075 ? arm_stub_long_branch_arm_nacl_pic
4076 : arm_stub_long_branch_any_arm_pic))
4077 /* non-PIC stubs. */
4078 : (globals->nacl_p
4079 ? arm_stub_long_branch_arm_nacl
4080 : arm_stub_long_branch_any_any);
4081 }
4082 }
4083 }
4084
4085 /* If a stub is needed, record the actual destination type. */
4086 if (stub_type != arm_stub_none)
4087 *actual_branch_type = branch_type;
4088
4089 return stub_type;
4090 }
4091
4092 /* Build a name for an entry in the stub hash table. */
4093
4094 static char *
4095 elf32_arm_stub_name (const asection *input_section,
4096 const asection *sym_sec,
4097 const struct elf32_arm_link_hash_entry *hash,
4098 const Elf_Internal_Rela *rel,
4099 enum elf32_arm_stub_type stub_type)
4100 {
4101 char *stub_name;
4102 bfd_size_type len;
4103
4104 if (hash)
4105 {
4106 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4107 stub_name = (char *) bfd_malloc (len);
4108 if (stub_name != NULL)
4109 sprintf (stub_name, "%08x_%s+%x_%d",
4110 input_section->id & 0xffffffff,
4111 hash->root.root.root.string,
4112 (int) rel->r_addend & 0xffffffff,
4113 (int) stub_type);
4114 }
4115 else
4116 {
4117 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4118 stub_name = (char *) bfd_malloc (len);
4119 if (stub_name != NULL)
4120 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4121 input_section->id & 0xffffffff,
4122 sym_sec->id & 0xffffffff,
4123 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4124 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4125 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4126 (int) rel->r_addend & 0xffffffff,
4127 (int) stub_type);
4128 }
4129
4130 return stub_name;
4131 }
4132
4133 /* Look up an entry in the stub hash. Stub entries are cached because
4134 creating the stub name takes a bit of time. */
4135
4136 static struct elf32_arm_stub_hash_entry *
4137 elf32_arm_get_stub_entry (const asection *input_section,
4138 const asection *sym_sec,
4139 struct elf_link_hash_entry *hash,
4140 const Elf_Internal_Rela *rel,
4141 struct elf32_arm_link_hash_table *htab,
4142 enum elf32_arm_stub_type stub_type)
4143 {
4144 struct elf32_arm_stub_hash_entry *stub_entry;
4145 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4146 const asection *id_sec;
4147
4148 if ((input_section->flags & SEC_CODE) == 0)
4149 return NULL;
4150
4151 /* If this input section is part of a group of sections sharing one
4152 stub section, then use the id of the first section in the group.
4153 Stub names need to include a section id, as there may well be
4154 more than one stub used to reach say, printf, and we need to
4155 distinguish between them. */
4156 id_sec = htab->stub_group[input_section->id].link_sec;
4157
4158 if (h != NULL && h->stub_cache != NULL
4159 && h->stub_cache->h == h
4160 && h->stub_cache->id_sec == id_sec
4161 && h->stub_cache->stub_type == stub_type)
4162 {
4163 stub_entry = h->stub_cache;
4164 }
4165 else
4166 {
4167 char *stub_name;
4168
4169 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4170 if (stub_name == NULL)
4171 return NULL;
4172
4173 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4174 stub_name, FALSE, FALSE);
4175 if (h != NULL)
4176 h->stub_cache = stub_entry;
4177
4178 free (stub_name);
4179 }
4180
4181 return stub_entry;
4182 }
4183
4184 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4185 section. */
4186
4187 static bfd_boolean
4188 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4189 {
4190 if (stub_type >= max_stub_type)
4191 abort (); /* Should be unreachable. */
4192
4193 return FALSE;
4194 }
4195
4196 /* Required alignment (as a power of 2) for the dedicated section holding
4197 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4198 with input sections. */
4199
4200 static int
4201 arm_dedicated_stub_output_section_required_alignment
4202 (enum elf32_arm_stub_type stub_type)
4203 {
4204 if (stub_type >= max_stub_type)
4205 abort (); /* Should be unreachable. */
4206
4207 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4208 return 0;
4209 }
4210
4211 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4212 NULL if veneers of this type are interspersed with input sections. */
4213
4214 static const char *
4215 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4216 {
4217 if (stub_type >= max_stub_type)
4218 abort (); /* Should be unreachable. */
4219
4220 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4221 return NULL;
4222 }
4223
4224 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4225 returns the address of the hash table field in HTAB holding a pointer to the
4226 corresponding input section. Otherwise, returns NULL. */
4227
4228 static asection **
4229 arm_dedicated_stub_input_section_ptr
4230 (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
4231 enum elf32_arm_stub_type stub_type)
4232 {
4233 if (stub_type >= max_stub_type)
4234 abort (); /* Should be unreachable. */
4235
4236 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4237 return NULL;
4238 }
4239
4240 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4241 is the section that branch into veneer and can be NULL if stub should go in
4242 a dedicated output section. Returns a pointer to the stub section, and the
4243 section to which the stub section will be attached (in *LINK_SEC_P).
4244 LINK_SEC_P may be NULL. */
4245
4246 static asection *
4247 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4248 struct elf32_arm_link_hash_table *htab,
4249 enum elf32_arm_stub_type stub_type)
4250 {
4251 asection *link_sec, *out_sec, **stub_sec_p;
4252 const char *stub_sec_prefix;
4253 bfd_boolean dedicated_output_section =
4254 arm_dedicated_stub_output_section_required (stub_type);
4255 int align;
4256
4257 if (dedicated_output_section)
4258 {
4259 bfd *output_bfd = htab->obfd;
4260 const char *out_sec_name =
4261 arm_dedicated_stub_output_section_name (stub_type);
4262 link_sec = NULL;
4263 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4264 stub_sec_prefix = out_sec_name;
4265 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4266 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4267 if (out_sec == NULL)
4268 {
4269 (*_bfd_error_handler) (_("No address assigned to the veneers output "
4270 "section %s"), out_sec_name);
4271 return NULL;
4272 }
4273 }
4274 else
4275 {
4276 link_sec = htab->stub_group[section->id].link_sec;
4277 BFD_ASSERT (link_sec != NULL);
4278 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4279 if (*stub_sec_p == NULL)
4280 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4281 stub_sec_prefix = link_sec->name;
4282 out_sec = link_sec->output_section;
4283 align = htab->nacl_p ? 4 : 3;
4284 }
4285
4286 if (*stub_sec_p == NULL)
4287 {
4288 size_t namelen;
4289 bfd_size_type len;
4290 char *s_name;
4291
4292 namelen = strlen (stub_sec_prefix);
4293 len = namelen + sizeof (STUB_SUFFIX);
4294 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4295 if (s_name == NULL)
4296 return NULL;
4297
4298 memcpy (s_name, stub_sec_prefix, namelen);
4299 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4300 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4301 align);
4302 if (*stub_sec_p == NULL)
4303 return NULL;
4304
4305 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4306 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4307 | SEC_KEEP;
4308 }
4309
4310 if (!dedicated_output_section)
4311 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4312
4313 if (link_sec_p)
4314 *link_sec_p = link_sec;
4315
4316 return *stub_sec_p;
4317 }
4318
4319 /* Add a new stub entry to the stub hash. Not all fields of the new
4320 stub entry are initialised. */
4321
4322 static struct elf32_arm_stub_hash_entry *
4323 elf32_arm_add_stub (const char *stub_name, asection *section,
4324 struct elf32_arm_link_hash_table *htab,
4325 enum elf32_arm_stub_type stub_type)
4326 {
4327 asection *link_sec;
4328 asection *stub_sec;
4329 struct elf32_arm_stub_hash_entry *stub_entry;
4330
4331 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4332 stub_type);
4333 if (stub_sec == NULL)
4334 return NULL;
4335
4336 /* Enter this entry into the linker stub hash table. */
4337 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4338 TRUE, FALSE);
4339 if (stub_entry == NULL)
4340 {
4341 if (section == NULL)
4342 section = stub_sec;
4343 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4344 section->owner,
4345 stub_name);
4346 return NULL;
4347 }
4348
4349 stub_entry->stub_sec = stub_sec;
4350 stub_entry->stub_offset = 0;
4351 stub_entry->id_sec = link_sec;
4352
4353 return stub_entry;
4354 }
4355
4356 /* Store an Arm insn into an output section not processed by
4357 elf32_arm_write_section. */
4358
4359 static void
4360 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4361 bfd * output_bfd, bfd_vma val, void * ptr)
4362 {
4363 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4364 bfd_putl32 (val, ptr);
4365 else
4366 bfd_putb32 (val, ptr);
4367 }
4368
4369 /* Store a 16-bit Thumb insn into an output section not processed by
4370 elf32_arm_write_section. */
4371
4372 static void
4373 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4374 bfd * output_bfd, bfd_vma val, void * ptr)
4375 {
4376 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4377 bfd_putl16 (val, ptr);
4378 else
4379 bfd_putb16 (val, ptr);
4380 }
4381
4382 /* Store a Thumb2 insn into an output section not processed by
4383 elf32_arm_write_section. */
4384
4385 static void
4386 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4387 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4388 {
4389 /* T2 instructions are 16-bit streamed. */
4390 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4391 {
4392 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4393 bfd_putl16 ((val & 0xffff), ptr + 2);
4394 }
4395 else
4396 {
4397 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4398 bfd_putb16 ((val & 0xffff), ptr + 2);
4399 }
4400 }
4401
4402 /* If it's possible to change R_TYPE to a more efficient access
4403 model, return the new reloc type. */
4404
4405 static unsigned
4406 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4407 struct elf_link_hash_entry *h)
4408 {
4409 int is_local = (h == NULL);
4410
4411 if (bfd_link_pic (info)
4412 || (h && h->root.type == bfd_link_hash_undefweak))
4413 return r_type;
4414
4415 /* We do not support relaxations for Old TLS models. */
4416 switch (r_type)
4417 {
4418 case R_ARM_TLS_GOTDESC:
4419 case R_ARM_TLS_CALL:
4420 case R_ARM_THM_TLS_CALL:
4421 case R_ARM_TLS_DESCSEQ:
4422 case R_ARM_THM_TLS_DESCSEQ:
4423 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4424 }
4425
4426 return r_type;
4427 }
4428
4429 static bfd_reloc_status_type elf32_arm_final_link_relocate
4430 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4431 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4432 const char *, unsigned char, enum arm_st_branch_type,
4433 struct elf_link_hash_entry *, bfd_boolean *, char **);
4434
4435 static unsigned int
4436 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4437 {
4438 switch (stub_type)
4439 {
4440 case arm_stub_a8_veneer_b_cond:
4441 case arm_stub_a8_veneer_b:
4442 case arm_stub_a8_veneer_bl:
4443 return 2;
4444
4445 case arm_stub_long_branch_any_any:
4446 case arm_stub_long_branch_v4t_arm_thumb:
4447 case arm_stub_long_branch_thumb_only:
4448 case arm_stub_long_branch_thumb2_only:
4449 case arm_stub_long_branch_v4t_thumb_thumb:
4450 case arm_stub_long_branch_v4t_thumb_arm:
4451 case arm_stub_short_branch_v4t_thumb_arm:
4452 case arm_stub_long_branch_any_arm_pic:
4453 case arm_stub_long_branch_any_thumb_pic:
4454 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4455 case arm_stub_long_branch_v4t_arm_thumb_pic:
4456 case arm_stub_long_branch_v4t_thumb_arm_pic:
4457 case arm_stub_long_branch_thumb_only_pic:
4458 case arm_stub_long_branch_any_tls_pic:
4459 case arm_stub_long_branch_v4t_thumb_tls_pic:
4460 case arm_stub_a8_veneer_blx:
4461 return 4;
4462
4463 case arm_stub_long_branch_arm_nacl:
4464 case arm_stub_long_branch_arm_nacl_pic:
4465 return 16;
4466
4467 default:
4468 abort (); /* Should be unreachable. */
4469 }
4470 }
4471
4472 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4473 veneering (TRUE) or have their own symbol (FALSE). */
4474
4475 static bfd_boolean
4476 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4477 {
4478 if (stub_type >= max_stub_type)
4479 abort (); /* Should be unreachable. */
4480
4481 return FALSE;
4482 }
4483
4484 /* Returns the padding needed for the dedicated section used stubs of type
4485 STUB_TYPE. */
4486
4487 static int
4488 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4489 {
4490 if (stub_type >= max_stub_type)
4491 abort (); /* Should be unreachable. */
4492
4493 return 0;
4494 }
4495
4496 static bfd_boolean
4497 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4498 void * in_arg)
4499 {
4500 #define MAXRELOCS 3
4501 struct elf32_arm_stub_hash_entry *stub_entry;
4502 struct elf32_arm_link_hash_table *globals;
4503 struct bfd_link_info *info;
4504 asection *stub_sec;
4505 bfd *stub_bfd;
4506 bfd_byte *loc;
4507 bfd_vma sym_value;
4508 int template_size;
4509 int size;
4510 const insn_sequence *template_sequence;
4511 int i;
4512 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4513 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4514 int nrelocs = 0;
4515
4516 /* Massage our args to the form they really have. */
4517 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4518 info = (struct bfd_link_info *) in_arg;
4519
4520 globals = elf32_arm_hash_table (info);
4521 if (globals == NULL)
4522 return FALSE;
4523
4524 stub_sec = stub_entry->stub_sec;
4525
4526 if ((globals->fix_cortex_a8 < 0)
4527 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4528 /* We have to do less-strictly-aligned fixes last. */
4529 return TRUE;
4530
4531 /* Make a note of the offset within the stubs for this entry. */
4532 stub_entry->stub_offset = stub_sec->size;
4533 loc = stub_sec->contents + stub_entry->stub_offset;
4534
4535 stub_bfd = stub_sec->owner;
4536
4537 /* This is the address of the stub destination. */
4538 sym_value = (stub_entry->target_value
4539 + stub_entry->target_section->output_offset
4540 + stub_entry->target_section->output_section->vma);
4541
4542 template_sequence = stub_entry->stub_template;
4543 template_size = stub_entry->stub_template_size;
4544
4545 size = 0;
4546 for (i = 0; i < template_size; i++)
4547 {
4548 switch (template_sequence[i].type)
4549 {
4550 case THUMB16_TYPE:
4551 {
4552 bfd_vma data = (bfd_vma) template_sequence[i].data;
4553 if (template_sequence[i].reloc_addend != 0)
4554 {
4555 /* We've borrowed the reloc_addend field to mean we should
4556 insert a condition code into this (Thumb-1 branch)
4557 instruction. See THUMB16_BCOND_INSN. */
4558 BFD_ASSERT ((data & 0xff00) == 0xd000);
4559 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4560 }
4561 bfd_put_16 (stub_bfd, data, loc + size);
4562 size += 2;
4563 }
4564 break;
4565
4566 case THUMB32_TYPE:
4567 bfd_put_16 (stub_bfd,
4568 (template_sequence[i].data >> 16) & 0xffff,
4569 loc + size);
4570 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4571 loc + size + 2);
4572 if (template_sequence[i].r_type != R_ARM_NONE)
4573 {
4574 stub_reloc_idx[nrelocs] = i;
4575 stub_reloc_offset[nrelocs++] = size;
4576 }
4577 size += 4;
4578 break;
4579
4580 case ARM_TYPE:
4581 bfd_put_32 (stub_bfd, template_sequence[i].data,
4582 loc + size);
4583 /* Handle cases where the target is encoded within the
4584 instruction. */
4585 if (template_sequence[i].r_type == R_ARM_JUMP24)
4586 {
4587 stub_reloc_idx[nrelocs] = i;
4588 stub_reloc_offset[nrelocs++] = size;
4589 }
4590 size += 4;
4591 break;
4592
4593 case DATA_TYPE:
4594 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4595 stub_reloc_idx[nrelocs] = i;
4596 stub_reloc_offset[nrelocs++] = size;
4597 size += 4;
4598 break;
4599
4600 default:
4601 BFD_FAIL ();
4602 return FALSE;
4603 }
4604 }
4605
4606 stub_sec->size += size;
4607
4608 /* Stub size has already been computed in arm_size_one_stub. Check
4609 consistency. */
4610 BFD_ASSERT (size == stub_entry->stub_size);
4611
4612 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4613 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4614 sym_value |= 1;
4615
4616 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4617 in each stub. */
4618 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4619
4620 for (i = 0; i < nrelocs; i++)
4621 {
4622 Elf_Internal_Rela rel;
4623 bfd_boolean unresolved_reloc;
4624 char *error_message;
4625 bfd_vma points_to =
4626 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4627
4628 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4629 rel.r_info = ELF32_R_INFO (0,
4630 template_sequence[stub_reloc_idx[i]].r_type);
4631 rel.r_addend = 0;
4632
4633 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4634 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4635 template should refer back to the instruction after the original
4636 branch. We use target_section as Cortex-A8 erratum workaround stubs
4637 are only generated when both source and target are in the same
4638 section. */
4639 points_to = stub_entry->target_section->output_section->vma
4640 + stub_entry->target_section->output_offset
4641 + stub_entry->source_value;
4642
4643 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4644 (template_sequence[stub_reloc_idx[i]].r_type),
4645 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4646 points_to, info, stub_entry->target_section, "", STT_FUNC,
4647 stub_entry->branch_type,
4648 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4649 &error_message);
4650 }
4651
4652 return TRUE;
4653 #undef MAXRELOCS
4654 }
4655
4656 /* Calculate the template, template size and instruction size for a stub.
4657 Return value is the instruction size. */
4658
4659 static unsigned int
4660 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4661 const insn_sequence **stub_template,
4662 int *stub_template_size)
4663 {
4664 const insn_sequence *template_sequence = NULL;
4665 int template_size = 0, i;
4666 unsigned int size;
4667
4668 template_sequence = stub_definitions[stub_type].template_sequence;
4669 if (stub_template)
4670 *stub_template = template_sequence;
4671
4672 template_size = stub_definitions[stub_type].template_size;
4673 if (stub_template_size)
4674 *stub_template_size = template_size;
4675
4676 size = 0;
4677 for (i = 0; i < template_size; i++)
4678 {
4679 switch (template_sequence[i].type)
4680 {
4681 case THUMB16_TYPE:
4682 size += 2;
4683 break;
4684
4685 case ARM_TYPE:
4686 case THUMB32_TYPE:
4687 case DATA_TYPE:
4688 size += 4;
4689 break;
4690
4691 default:
4692 BFD_FAIL ();
4693 return 0;
4694 }
4695 }
4696
4697 return size;
4698 }
4699
4700 /* As above, but don't actually build the stub. Just bump offset so
4701 we know stub section sizes. */
4702
4703 static bfd_boolean
4704 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4705 void *in_arg ATTRIBUTE_UNUSED)
4706 {
4707 struct elf32_arm_stub_hash_entry *stub_entry;
4708 const insn_sequence *template_sequence;
4709 int template_size, size;
4710
4711 /* Massage our args to the form they really have. */
4712 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4713
4714 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4715 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4716
4717 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4718 &template_size);
4719
4720 stub_entry->stub_size = size;
4721 stub_entry->stub_template = template_sequence;
4722 stub_entry->stub_template_size = template_size;
4723
4724 size = (size + 7) & ~7;
4725 stub_entry->stub_sec->size += size;
4726
4727 return TRUE;
4728 }
4729
4730 /* External entry points for sizing and building linker stubs. */
4731
4732 /* Set up various things so that we can make a list of input sections
4733 for each output section included in the link. Returns -1 on error,
4734 0 when no stubs will be needed, and 1 on success. */
4735
4736 int
4737 elf32_arm_setup_section_lists (bfd *output_bfd,
4738 struct bfd_link_info *info)
4739 {
4740 bfd *input_bfd;
4741 unsigned int bfd_count;
4742 unsigned int top_id, top_index;
4743 asection *section;
4744 asection **input_list, **list;
4745 bfd_size_type amt;
4746 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4747
4748 if (htab == NULL)
4749 return 0;
4750 if (! is_elf_hash_table (htab))
4751 return 0;
4752
4753 /* Count the number of input BFDs and find the top input section id. */
4754 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4755 input_bfd != NULL;
4756 input_bfd = input_bfd->link.next)
4757 {
4758 bfd_count += 1;
4759 for (section = input_bfd->sections;
4760 section != NULL;
4761 section = section->next)
4762 {
4763 if (top_id < section->id)
4764 top_id = section->id;
4765 }
4766 }
4767 htab->bfd_count = bfd_count;
4768
4769 amt = sizeof (struct map_stub) * (top_id + 1);
4770 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4771 if (htab->stub_group == NULL)
4772 return -1;
4773 htab->top_id = top_id;
4774
4775 /* We can't use output_bfd->section_count here to find the top output
4776 section index as some sections may have been removed, and
4777 _bfd_strip_section_from_output doesn't renumber the indices. */
4778 for (section = output_bfd->sections, top_index = 0;
4779 section != NULL;
4780 section = section->next)
4781 {
4782 if (top_index < section->index)
4783 top_index = section->index;
4784 }
4785
4786 htab->top_index = top_index;
4787 amt = sizeof (asection *) * (top_index + 1);
4788 input_list = (asection **) bfd_malloc (amt);
4789 htab->input_list = input_list;
4790 if (input_list == NULL)
4791 return -1;
4792
4793 /* For sections we aren't interested in, mark their entries with a
4794 value we can check later. */
4795 list = input_list + top_index;
4796 do
4797 *list = bfd_abs_section_ptr;
4798 while (list-- != input_list);
4799
4800 for (section = output_bfd->sections;
4801 section != NULL;
4802 section = section->next)
4803 {
4804 if ((section->flags & SEC_CODE) != 0)
4805 input_list[section->index] = NULL;
4806 }
4807
4808 return 1;
4809 }
4810
4811 /* The linker repeatedly calls this function for each input section,
4812 in the order that input sections are linked into output sections.
4813 Build lists of input sections to determine groupings between which
4814 we may insert linker stubs. */
4815
4816 void
4817 elf32_arm_next_input_section (struct bfd_link_info *info,
4818 asection *isec)
4819 {
4820 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4821
4822 if (htab == NULL)
4823 return;
4824
4825 if (isec->output_section->index <= htab->top_index)
4826 {
4827 asection **list = htab->input_list + isec->output_section->index;
4828
4829 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4830 {
4831 /* Steal the link_sec pointer for our list. */
4832 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4833 /* This happens to make the list in reverse order,
4834 which we reverse later. */
4835 PREV_SEC (isec) = *list;
4836 *list = isec;
4837 }
4838 }
4839 }
4840
4841 /* See whether we can group stub sections together. Grouping stub
4842 sections may result in fewer stubs. More importantly, we need to
4843 put all .init* and .fini* stubs at the end of the .init or
4844 .fini output sections respectively, because glibc splits the
4845 _init and _fini functions into multiple parts. Putting a stub in
4846 the middle of a function is not a good idea. */
4847
4848 static void
4849 group_sections (struct elf32_arm_link_hash_table *htab,
4850 bfd_size_type stub_group_size,
4851 bfd_boolean stubs_always_after_branch)
4852 {
4853 asection **list = htab->input_list;
4854
4855 do
4856 {
4857 asection *tail = *list;
4858 asection *head;
4859
4860 if (tail == bfd_abs_section_ptr)
4861 continue;
4862
4863 /* Reverse the list: we must avoid placing stubs at the
4864 beginning of the section because the beginning of the text
4865 section may be required for an interrupt vector in bare metal
4866 code. */
4867 #define NEXT_SEC PREV_SEC
4868 head = NULL;
4869 while (tail != NULL)
4870 {
4871 /* Pop from tail. */
4872 asection *item = tail;
4873 tail = PREV_SEC (item);
4874
4875 /* Push on head. */
4876 NEXT_SEC (item) = head;
4877 head = item;
4878 }
4879
4880 while (head != NULL)
4881 {
4882 asection *curr;
4883 asection *next;
4884 bfd_vma stub_group_start = head->output_offset;
4885 bfd_vma end_of_next;
4886
4887 curr = head;
4888 while (NEXT_SEC (curr) != NULL)
4889 {
4890 next = NEXT_SEC (curr);
4891 end_of_next = next->output_offset + next->size;
4892 if (end_of_next - stub_group_start >= stub_group_size)
4893 /* End of NEXT is too far from start, so stop. */
4894 break;
4895 /* Add NEXT to the group. */
4896 curr = next;
4897 }
4898
4899 /* OK, the size from the start to the start of CURR is less
4900 than stub_group_size and thus can be handled by one stub
4901 section. (Or the head section is itself larger than
4902 stub_group_size, in which case we may be toast.)
4903 We should really be keeping track of the total size of
4904 stubs added here, as stubs contribute to the final output
4905 section size. */
4906 do
4907 {
4908 next = NEXT_SEC (head);
4909 /* Set up this stub group. */
4910 htab->stub_group[head->id].link_sec = curr;
4911 }
4912 while (head != curr && (head = next) != NULL);
4913
4914 /* But wait, there's more! Input sections up to stub_group_size
4915 bytes after the stub section can be handled by it too. */
4916 if (!stubs_always_after_branch)
4917 {
4918 stub_group_start = curr->output_offset + curr->size;
4919
4920 while (next != NULL)
4921 {
4922 end_of_next = next->output_offset + next->size;
4923 if (end_of_next - stub_group_start >= stub_group_size)
4924 /* End of NEXT is too far from stubs, so stop. */
4925 break;
4926 /* Add NEXT to the stub group. */
4927 head = next;
4928 next = NEXT_SEC (head);
4929 htab->stub_group[head->id].link_sec = curr;
4930 }
4931 }
4932 head = next;
4933 }
4934 }
4935 while (list++ != htab->input_list + htab->top_index);
4936
4937 free (htab->input_list);
4938 #undef PREV_SEC
4939 #undef NEXT_SEC
4940 }
4941
4942 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4943 erratum fix. */
4944
4945 static int
4946 a8_reloc_compare (const void *a, const void *b)
4947 {
4948 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4949 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4950
4951 if (ra->from < rb->from)
4952 return -1;
4953 else if (ra->from > rb->from)
4954 return 1;
4955 else
4956 return 0;
4957 }
4958
4959 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4960 const char *, char **);
4961
4962 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4963 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4964 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4965 otherwise. */
4966
4967 static bfd_boolean
4968 cortex_a8_erratum_scan (bfd *input_bfd,
4969 struct bfd_link_info *info,
4970 struct a8_erratum_fix **a8_fixes_p,
4971 unsigned int *num_a8_fixes_p,
4972 unsigned int *a8_fix_table_size_p,
4973 struct a8_erratum_reloc *a8_relocs,
4974 unsigned int num_a8_relocs,
4975 unsigned prev_num_a8_fixes,
4976 bfd_boolean *stub_changed_p)
4977 {
4978 asection *section;
4979 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4980 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4981 unsigned int num_a8_fixes = *num_a8_fixes_p;
4982 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4983
4984 if (htab == NULL)
4985 return FALSE;
4986
4987 for (section = input_bfd->sections;
4988 section != NULL;
4989 section = section->next)
4990 {
4991 bfd_byte *contents = NULL;
4992 struct _arm_elf_section_data *sec_data;
4993 unsigned int span;
4994 bfd_vma base_vma;
4995
4996 if (elf_section_type (section) != SHT_PROGBITS
4997 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4998 || (section->flags & SEC_EXCLUDE) != 0
4999 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5000 || (section->output_section == bfd_abs_section_ptr))
5001 continue;
5002
5003 base_vma = section->output_section->vma + section->output_offset;
5004
5005 if (elf_section_data (section)->this_hdr.contents != NULL)
5006 contents = elf_section_data (section)->this_hdr.contents;
5007 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5008 return TRUE;
5009
5010 sec_data = elf32_arm_section_data (section);
5011
5012 for (span = 0; span < sec_data->mapcount; span++)
5013 {
5014 unsigned int span_start = sec_data->map[span].vma;
5015 unsigned int span_end = (span == sec_data->mapcount - 1)
5016 ? section->size : sec_data->map[span + 1].vma;
5017 unsigned int i;
5018 char span_type = sec_data->map[span].type;
5019 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5020
5021 if (span_type != 't')
5022 continue;
5023
5024 /* Span is entirely within a single 4KB region: skip scanning. */
5025 if (((base_vma + span_start) & ~0xfff)
5026 == ((base_vma + span_end) & ~0xfff))
5027 continue;
5028
5029 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5030
5031 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5032 * The branch target is in the same 4KB region as the
5033 first half of the branch.
5034 * The instruction before the branch is a 32-bit
5035 length non-branch instruction. */
5036 for (i = span_start; i < span_end;)
5037 {
5038 unsigned int insn = bfd_getl16 (&contents[i]);
5039 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5040 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5041
5042 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5043 insn_32bit = TRUE;
5044
5045 if (insn_32bit)
5046 {
5047 /* Load the rest of the insn (in manual-friendly order). */
5048 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5049
5050 /* Encoding T4: B<c>.W. */
5051 is_b = (insn & 0xf800d000) == 0xf0009000;
5052 /* Encoding T1: BL<c>.W. */
5053 is_bl = (insn & 0xf800d000) == 0xf000d000;
5054 /* Encoding T2: BLX<c>.W. */
5055 is_blx = (insn & 0xf800d000) == 0xf000c000;
5056 /* Encoding T3: B<c>.W (not permitted in IT block). */
5057 is_bcc = (insn & 0xf800d000) == 0xf0008000
5058 && (insn & 0x07f00000) != 0x03800000;
5059 }
5060
5061 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5062
5063 if (((base_vma + i) & 0xfff) == 0xffe
5064 && insn_32bit
5065 && is_32bit_branch
5066 && last_was_32bit
5067 && ! last_was_branch)
5068 {
5069 bfd_signed_vma offset = 0;
5070 bfd_boolean force_target_arm = FALSE;
5071 bfd_boolean force_target_thumb = FALSE;
5072 bfd_vma target;
5073 enum elf32_arm_stub_type stub_type = arm_stub_none;
5074 struct a8_erratum_reloc key, *found;
5075 bfd_boolean use_plt = FALSE;
5076
5077 key.from = base_vma + i;
5078 found = (struct a8_erratum_reloc *)
5079 bsearch (&key, a8_relocs, num_a8_relocs,
5080 sizeof (struct a8_erratum_reloc),
5081 &a8_reloc_compare);
5082
5083 if (found)
5084 {
5085 char *error_message = NULL;
5086 struct elf_link_hash_entry *entry;
5087
5088 /* We don't care about the error returned from this
5089 function, only if there is glue or not. */
5090 entry = find_thumb_glue (info, found->sym_name,
5091 &error_message);
5092
5093 if (entry)
5094 found->non_a8_stub = TRUE;
5095
5096 /* Keep a simpler condition, for the sake of clarity. */
5097 if (htab->root.splt != NULL && found->hash != NULL
5098 && found->hash->root.plt.offset != (bfd_vma) -1)
5099 use_plt = TRUE;
5100
5101 if (found->r_type == R_ARM_THM_CALL)
5102 {
5103 if (found->branch_type == ST_BRANCH_TO_ARM
5104 || use_plt)
5105 force_target_arm = TRUE;
5106 else
5107 force_target_thumb = TRUE;
5108 }
5109 }
5110
5111 /* Check if we have an offending branch instruction. */
5112
5113 if (found && found->non_a8_stub)
5114 /* We've already made a stub for this instruction, e.g.
5115 it's a long branch or a Thumb->ARM stub. Assume that
5116 stub will suffice to work around the A8 erratum (see
5117 setting of always_after_branch above). */
5118 ;
5119 else if (is_bcc)
5120 {
5121 offset = (insn & 0x7ff) << 1;
5122 offset |= (insn & 0x3f0000) >> 4;
5123 offset |= (insn & 0x2000) ? 0x40000 : 0;
5124 offset |= (insn & 0x800) ? 0x80000 : 0;
5125 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5126 if (offset & 0x100000)
5127 offset |= ~ ((bfd_signed_vma) 0xfffff);
5128 stub_type = arm_stub_a8_veneer_b_cond;
5129 }
5130 else if (is_b || is_bl || is_blx)
5131 {
5132 int s = (insn & 0x4000000) != 0;
5133 int j1 = (insn & 0x2000) != 0;
5134 int j2 = (insn & 0x800) != 0;
5135 int i1 = !(j1 ^ s);
5136 int i2 = !(j2 ^ s);
5137
5138 offset = (insn & 0x7ff) << 1;
5139 offset |= (insn & 0x3ff0000) >> 4;
5140 offset |= i2 << 22;
5141 offset |= i1 << 23;
5142 offset |= s << 24;
5143 if (offset & 0x1000000)
5144 offset |= ~ ((bfd_signed_vma) 0xffffff);
5145
5146 if (is_blx)
5147 offset &= ~ ((bfd_signed_vma) 3);
5148
5149 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5150 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5151 }
5152
5153 if (stub_type != arm_stub_none)
5154 {
5155 bfd_vma pc_for_insn = base_vma + i + 4;
5156
5157 /* The original instruction is a BL, but the target is
5158 an ARM instruction. If we were not making a stub,
5159 the BL would have been converted to a BLX. Use the
5160 BLX stub instead in that case. */
5161 if (htab->use_blx && force_target_arm
5162 && stub_type == arm_stub_a8_veneer_bl)
5163 {
5164 stub_type = arm_stub_a8_veneer_blx;
5165 is_blx = TRUE;
5166 is_bl = FALSE;
5167 }
5168 /* Conversely, if the original instruction was
5169 BLX but the target is Thumb mode, use the BL
5170 stub. */
5171 else if (force_target_thumb
5172 && stub_type == arm_stub_a8_veneer_blx)
5173 {
5174 stub_type = arm_stub_a8_veneer_bl;
5175 is_blx = FALSE;
5176 is_bl = TRUE;
5177 }
5178
5179 if (is_blx)
5180 pc_for_insn &= ~ ((bfd_vma) 3);
5181
5182 /* If we found a relocation, use the proper destination,
5183 not the offset in the (unrelocated) instruction.
5184 Note this is always done if we switched the stub type
5185 above. */
5186 if (found)
5187 offset =
5188 (bfd_signed_vma) (found->destination - pc_for_insn);
5189
5190 /* If the stub will use a Thumb-mode branch to a
5191 PLT target, redirect it to the preceding Thumb
5192 entry point. */
5193 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5194 offset -= PLT_THUMB_STUB_SIZE;
5195
5196 target = pc_for_insn + offset;
5197
5198 /* The BLX stub is ARM-mode code. Adjust the offset to
5199 take the different PC value (+8 instead of +4) into
5200 account. */
5201 if (stub_type == arm_stub_a8_veneer_blx)
5202 offset += 4;
5203
5204 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5205 {
5206 char *stub_name = NULL;
5207
5208 if (num_a8_fixes == a8_fix_table_size)
5209 {
5210 a8_fix_table_size *= 2;
5211 a8_fixes = (struct a8_erratum_fix *)
5212 bfd_realloc (a8_fixes,
5213 sizeof (struct a8_erratum_fix)
5214 * a8_fix_table_size);
5215 }
5216
5217 if (num_a8_fixes < prev_num_a8_fixes)
5218 {
5219 /* If we're doing a subsequent scan,
5220 check if we've found the same fix as
5221 before, and try and reuse the stub
5222 name. */
5223 stub_name = a8_fixes[num_a8_fixes].stub_name;
5224 if ((a8_fixes[num_a8_fixes].section != section)
5225 || (a8_fixes[num_a8_fixes].offset != i))
5226 {
5227 free (stub_name);
5228 stub_name = NULL;
5229 *stub_changed_p = TRUE;
5230 }
5231 }
5232
5233 if (!stub_name)
5234 {
5235 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5236 if (stub_name != NULL)
5237 sprintf (stub_name, "%x:%x", section->id, i);
5238 }
5239
5240 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5241 a8_fixes[num_a8_fixes].section = section;
5242 a8_fixes[num_a8_fixes].offset = i;
5243 a8_fixes[num_a8_fixes].target_offset =
5244 target - base_vma;
5245 a8_fixes[num_a8_fixes].orig_insn = insn;
5246 a8_fixes[num_a8_fixes].stub_name = stub_name;
5247 a8_fixes[num_a8_fixes].stub_type = stub_type;
5248 a8_fixes[num_a8_fixes].branch_type =
5249 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5250
5251 num_a8_fixes++;
5252 }
5253 }
5254 }
5255
5256 i += insn_32bit ? 4 : 2;
5257 last_was_32bit = insn_32bit;
5258 last_was_branch = is_32bit_branch;
5259 }
5260 }
5261
5262 if (elf_section_data (section)->this_hdr.contents == NULL)
5263 free (contents);
5264 }
5265
5266 *a8_fixes_p = a8_fixes;
5267 *num_a8_fixes_p = num_a8_fixes;
5268 *a8_fix_table_size_p = a8_fix_table_size;
5269
5270 return FALSE;
5271 }
5272
5273 /* Create or update a stub entry depending on whether the stub can already be
5274 found in HTAB. The stub is identified by:
5275 - its type STUB_TYPE
5276 - its source branch (note that several can share the same stub) whose
5277 section and relocation (if any) are given by SECTION and IRELA
5278 respectively
5279 - its target symbol whose input section, hash, name, value and branch type
5280 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5281 respectively
5282
5283 If found, the value of the stub's target symbol is updated from SYM_VALUE
5284 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5285 TRUE and the stub entry is initialized.
5286
5287 Returns whether the stub could be successfully created or updated, or FALSE
5288 if an error occured. */
5289
5290 static bfd_boolean
5291 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5292 enum elf32_arm_stub_type stub_type, asection *section,
5293 Elf_Internal_Rela *irela, asection *sym_sec,
5294 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5295 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5296 bfd_boolean *new_stub)
5297 {
5298 const asection *id_sec;
5299 char *stub_name;
5300 struct elf32_arm_stub_hash_entry *stub_entry;
5301 unsigned int r_type;
5302 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5303
5304 BFD_ASSERT (stub_type != arm_stub_none);
5305 *new_stub = FALSE;
5306
5307 if (sym_claimed)
5308 stub_name = sym_name;
5309 else
5310 {
5311 BFD_ASSERT (irela);
5312 BFD_ASSERT (section);
5313
5314 /* Support for grouping stub sections. */
5315 id_sec = htab->stub_group[section->id].link_sec;
5316
5317 /* Get the name of this stub. */
5318 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5319 stub_type);
5320 if (!stub_name)
5321 return FALSE;
5322 }
5323
5324 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5325 FALSE);
5326 /* The proper stub has already been created, just update its value. */
5327 if (stub_entry != NULL)
5328 {
5329 if (!sym_claimed)
5330 free (stub_name);
5331 stub_entry->target_value = sym_value;
5332 return TRUE;
5333 }
5334
5335 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5336 if (stub_entry == NULL)
5337 {
5338 if (!sym_claimed)
5339 free (stub_name);
5340 return FALSE;
5341 }
5342
5343 stub_entry->target_value = sym_value;
5344 stub_entry->target_section = sym_sec;
5345 stub_entry->stub_type = stub_type;
5346 stub_entry->h = hash;
5347 stub_entry->branch_type = branch_type;
5348
5349 if (sym_claimed)
5350 stub_entry->output_name = sym_name;
5351 else
5352 {
5353 if (sym_name == NULL)
5354 sym_name = "unnamed";
5355 stub_entry->output_name = (char *)
5356 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5357 + strlen (sym_name));
5358 if (stub_entry->output_name == NULL)
5359 {
5360 free (stub_name);
5361 return FALSE;
5362 }
5363
5364 /* For historical reasons, use the existing names for ARM-to-Thumb and
5365 Thumb-to-ARM stubs. */
5366 r_type = ELF32_R_TYPE (irela->r_info);
5367 if ((r_type == (unsigned int) R_ARM_THM_CALL
5368 || r_type == (unsigned int) R_ARM_THM_JUMP24
5369 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5370 && branch_type == ST_BRANCH_TO_ARM)
5371 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5372 else if ((r_type == (unsigned int) R_ARM_CALL
5373 || r_type == (unsigned int) R_ARM_JUMP24)
5374 && branch_type == ST_BRANCH_TO_THUMB)
5375 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5376 else
5377 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5378 }
5379
5380 *new_stub = TRUE;
5381 return TRUE;
5382 }
5383
5384 /* Determine and set the size of the stub section for a final link.
5385
5386 The basic idea here is to examine all the relocations looking for
5387 PC-relative calls to a target that is unreachable with a "bl"
5388 instruction. */
5389
5390 bfd_boolean
5391 elf32_arm_size_stubs (bfd *output_bfd,
5392 bfd *stub_bfd,
5393 struct bfd_link_info *info,
5394 bfd_signed_vma group_size,
5395 asection * (*add_stub_section) (const char *, asection *,
5396 asection *,
5397 unsigned int),
5398 void (*layout_sections_again) (void))
5399 {
5400 bfd_size_type stub_group_size;
5401 bfd_boolean stubs_always_after_branch;
5402 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5403 struct a8_erratum_fix *a8_fixes = NULL;
5404 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5405 struct a8_erratum_reloc *a8_relocs = NULL;
5406 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5407
5408 if (htab == NULL)
5409 return FALSE;
5410
5411 if (htab->fix_cortex_a8)
5412 {
5413 a8_fixes = (struct a8_erratum_fix *)
5414 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5415 a8_relocs = (struct a8_erratum_reloc *)
5416 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5417 }
5418
5419 /* Propagate mach to stub bfd, because it may not have been
5420 finalized when we created stub_bfd. */
5421 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5422 bfd_get_mach (output_bfd));
5423
5424 /* Stash our params away. */
5425 htab->stub_bfd = stub_bfd;
5426 htab->add_stub_section = add_stub_section;
5427 htab->layout_sections_again = layout_sections_again;
5428 stubs_always_after_branch = group_size < 0;
5429
5430 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5431 as the first half of a 32-bit branch straddling two 4K pages. This is a
5432 crude way of enforcing that. */
5433 if (htab->fix_cortex_a8)
5434 stubs_always_after_branch = 1;
5435
5436 if (group_size < 0)
5437 stub_group_size = -group_size;
5438 else
5439 stub_group_size = group_size;
5440
5441 if (stub_group_size == 1)
5442 {
5443 /* Default values. */
5444 /* Thumb branch range is +-4MB has to be used as the default
5445 maximum size (a given section can contain both ARM and Thumb
5446 code, so the worst case has to be taken into account).
5447
5448 This value is 24K less than that, which allows for 2025
5449 12-byte stubs. If we exceed that, then we will fail to link.
5450 The user will have to relink with an explicit group size
5451 option. */
5452 stub_group_size = 4170000;
5453 }
5454
5455 group_sections (htab, stub_group_size, stubs_always_after_branch);
5456
5457 /* If we're applying the cortex A8 fix, we need to determine the
5458 program header size now, because we cannot change it later --
5459 that could alter section placements. Notice the A8 erratum fix
5460 ends up requiring the section addresses to remain unchanged
5461 modulo the page size. That's something we cannot represent
5462 inside BFD, and we don't want to force the section alignment to
5463 be the page size. */
5464 if (htab->fix_cortex_a8)
5465 (*htab->layout_sections_again) ();
5466
5467 while (1)
5468 {
5469 bfd *input_bfd;
5470 unsigned int bfd_indx;
5471 asection *stub_sec;
5472 enum elf32_arm_stub_type stub_type;
5473 bfd_boolean stub_changed = FALSE;
5474 unsigned prev_num_a8_fixes = num_a8_fixes;
5475
5476 num_a8_fixes = 0;
5477 for (input_bfd = info->input_bfds, bfd_indx = 0;
5478 input_bfd != NULL;
5479 input_bfd = input_bfd->link.next, bfd_indx++)
5480 {
5481 Elf_Internal_Shdr *symtab_hdr;
5482 asection *section;
5483 Elf_Internal_Sym *local_syms = NULL;
5484
5485 if (!is_arm_elf (input_bfd))
5486 continue;
5487
5488 num_a8_relocs = 0;
5489
5490 /* We'll need the symbol table in a second. */
5491 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5492 if (symtab_hdr->sh_info == 0)
5493 continue;
5494
5495 /* Walk over each section attached to the input bfd. */
5496 for (section = input_bfd->sections;
5497 section != NULL;
5498 section = section->next)
5499 {
5500 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5501
5502 /* If there aren't any relocs, then there's nothing more
5503 to do. */
5504 if ((section->flags & SEC_RELOC) == 0
5505 || section->reloc_count == 0
5506 || (section->flags & SEC_CODE) == 0)
5507 continue;
5508
5509 /* If this section is a link-once section that will be
5510 discarded, then don't create any stubs. */
5511 if (section->output_section == NULL
5512 || section->output_section->owner != output_bfd)
5513 continue;
5514
5515 /* Get the relocs. */
5516 internal_relocs
5517 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5518 NULL, info->keep_memory);
5519 if (internal_relocs == NULL)
5520 goto error_ret_free_local;
5521
5522 /* Now examine each relocation. */
5523 irela = internal_relocs;
5524 irelaend = irela + section->reloc_count;
5525 for (; irela < irelaend; irela++)
5526 {
5527 unsigned int r_type, r_indx;
5528 asection *sym_sec;
5529 bfd_vma sym_value;
5530 bfd_vma destination;
5531 struct elf32_arm_link_hash_entry *hash;
5532 const char *sym_name;
5533 unsigned char st_type;
5534 enum arm_st_branch_type branch_type;
5535 bfd_boolean created_stub = FALSE;
5536
5537 r_type = ELF32_R_TYPE (irela->r_info);
5538 r_indx = ELF32_R_SYM (irela->r_info);
5539
5540 if (r_type >= (unsigned int) R_ARM_max)
5541 {
5542 bfd_set_error (bfd_error_bad_value);
5543 error_ret_free_internal:
5544 if (elf_section_data (section)->relocs == NULL)
5545 free (internal_relocs);
5546 /* Fall through. */
5547 error_ret_free_local:
5548 if (local_syms != NULL
5549 && (symtab_hdr->contents
5550 != (unsigned char *) local_syms))
5551 free (local_syms);
5552 return FALSE;
5553 }
5554
5555 hash = NULL;
5556 if (r_indx >= symtab_hdr->sh_info)
5557 hash = elf32_arm_hash_entry
5558 (elf_sym_hashes (input_bfd)
5559 [r_indx - symtab_hdr->sh_info]);
5560
5561 /* Only look for stubs on branch instructions, or
5562 non-relaxed TLSCALL */
5563 if ((r_type != (unsigned int) R_ARM_CALL)
5564 && (r_type != (unsigned int) R_ARM_THM_CALL)
5565 && (r_type != (unsigned int) R_ARM_JUMP24)
5566 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5567 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5568 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5569 && (r_type != (unsigned int) R_ARM_PLT32)
5570 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5571 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5572 && r_type == elf32_arm_tls_transition
5573 (info, r_type, &hash->root)
5574 && ((hash ? hash->tls_type
5575 : (elf32_arm_local_got_tls_type
5576 (input_bfd)[r_indx]))
5577 & GOT_TLS_GDESC) != 0))
5578 continue;
5579
5580 /* Now determine the call target, its name, value,
5581 section. */
5582 sym_sec = NULL;
5583 sym_value = 0;
5584 destination = 0;
5585 sym_name = NULL;
5586
5587 if (r_type == (unsigned int) R_ARM_TLS_CALL
5588 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5589 {
5590 /* A non-relaxed TLS call. The target is the
5591 plt-resident trampoline and nothing to do
5592 with the symbol. */
5593 BFD_ASSERT (htab->tls_trampoline > 0);
5594 sym_sec = htab->root.splt;
5595 sym_value = htab->tls_trampoline;
5596 hash = 0;
5597 st_type = STT_FUNC;
5598 branch_type = ST_BRANCH_TO_ARM;
5599 }
5600 else if (!hash)
5601 {
5602 /* It's a local symbol. */
5603 Elf_Internal_Sym *sym;
5604
5605 if (local_syms == NULL)
5606 {
5607 local_syms
5608 = (Elf_Internal_Sym *) symtab_hdr->contents;
5609 if (local_syms == NULL)
5610 local_syms
5611 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5612 symtab_hdr->sh_info, 0,
5613 NULL, NULL, NULL);
5614 if (local_syms == NULL)
5615 goto error_ret_free_internal;
5616 }
5617
5618 sym = local_syms + r_indx;
5619 if (sym->st_shndx == SHN_UNDEF)
5620 sym_sec = bfd_und_section_ptr;
5621 else if (sym->st_shndx == SHN_ABS)
5622 sym_sec = bfd_abs_section_ptr;
5623 else if (sym->st_shndx == SHN_COMMON)
5624 sym_sec = bfd_com_section_ptr;
5625 else
5626 sym_sec =
5627 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5628
5629 if (!sym_sec)
5630 /* This is an undefined symbol. It can never
5631 be resolved. */
5632 continue;
5633
5634 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5635 sym_value = sym->st_value;
5636 destination = (sym_value + irela->r_addend
5637 + sym_sec->output_offset
5638 + sym_sec->output_section->vma);
5639 st_type = ELF_ST_TYPE (sym->st_info);
5640 branch_type =
5641 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5642 sym_name
5643 = bfd_elf_string_from_elf_section (input_bfd,
5644 symtab_hdr->sh_link,
5645 sym->st_name);
5646 }
5647 else
5648 {
5649 /* It's an external symbol. */
5650 while (hash->root.root.type == bfd_link_hash_indirect
5651 || hash->root.root.type == bfd_link_hash_warning)
5652 hash = ((struct elf32_arm_link_hash_entry *)
5653 hash->root.root.u.i.link);
5654
5655 if (hash->root.root.type == bfd_link_hash_defined
5656 || hash->root.root.type == bfd_link_hash_defweak)
5657 {
5658 sym_sec = hash->root.root.u.def.section;
5659 sym_value = hash->root.root.u.def.value;
5660
5661 struct elf32_arm_link_hash_table *globals =
5662 elf32_arm_hash_table (info);
5663
5664 /* For a destination in a shared library,
5665 use the PLT stub as target address to
5666 decide whether a branch stub is
5667 needed. */
5668 if (globals != NULL
5669 && globals->root.splt != NULL
5670 && hash != NULL
5671 && hash->root.plt.offset != (bfd_vma) -1)
5672 {
5673 sym_sec = globals->root.splt;
5674 sym_value = hash->root.plt.offset;
5675 if (sym_sec->output_section != NULL)
5676 destination = (sym_value
5677 + sym_sec->output_offset
5678 + sym_sec->output_section->vma);
5679 }
5680 else if (sym_sec->output_section != NULL)
5681 destination = (sym_value + irela->r_addend
5682 + sym_sec->output_offset
5683 + sym_sec->output_section->vma);
5684 }
5685 else if ((hash->root.root.type == bfd_link_hash_undefined)
5686 || (hash->root.root.type == bfd_link_hash_undefweak))
5687 {
5688 /* For a shared library, use the PLT stub as
5689 target address to decide whether a long
5690 branch stub is needed.
5691 For absolute code, they cannot be handled. */
5692 struct elf32_arm_link_hash_table *globals =
5693 elf32_arm_hash_table (info);
5694
5695 if (globals != NULL
5696 && globals->root.splt != NULL
5697 && hash != NULL
5698 && hash->root.plt.offset != (bfd_vma) -1)
5699 {
5700 sym_sec = globals->root.splt;
5701 sym_value = hash->root.plt.offset;
5702 if (sym_sec->output_section != NULL)
5703 destination = (sym_value
5704 + sym_sec->output_offset
5705 + sym_sec->output_section->vma);
5706 }
5707 else
5708 continue;
5709 }
5710 else
5711 {
5712 bfd_set_error (bfd_error_bad_value);
5713 goto error_ret_free_internal;
5714 }
5715 st_type = hash->root.type;
5716 branch_type =
5717 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5718 sym_name = hash->root.root.root.string;
5719 }
5720
5721 do
5722 {
5723 bfd_boolean new_stub;
5724
5725 /* Determine what (if any) linker stub is needed. */
5726 stub_type = arm_type_of_stub (info, section, irela,
5727 st_type, &branch_type,
5728 hash, destination, sym_sec,
5729 input_bfd, sym_name);
5730 if (stub_type == arm_stub_none)
5731 break;
5732
5733 /* We've either created a stub for this reloc already,
5734 or we are about to. */
5735 created_stub =
5736 elf32_arm_create_stub (htab, stub_type, section, irela,
5737 sym_sec, hash,
5738 (char *) sym_name, sym_value,
5739 branch_type, &new_stub);
5740
5741 if (!created_stub)
5742 goto error_ret_free_internal;
5743 else if (!new_stub)
5744 break;
5745 else
5746 stub_changed = TRUE;
5747 }
5748 while (0);
5749
5750 /* Look for relocations which might trigger Cortex-A8
5751 erratum. */
5752 if (htab->fix_cortex_a8
5753 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5754 || r_type == (unsigned int) R_ARM_THM_JUMP19
5755 || r_type == (unsigned int) R_ARM_THM_CALL
5756 || r_type == (unsigned int) R_ARM_THM_XPC22))
5757 {
5758 bfd_vma from = section->output_section->vma
5759 + section->output_offset
5760 + irela->r_offset;
5761
5762 if ((from & 0xfff) == 0xffe)
5763 {
5764 /* Found a candidate. Note we haven't checked the
5765 destination is within 4K here: if we do so (and
5766 don't create an entry in a8_relocs) we can't tell
5767 that a branch should have been relocated when
5768 scanning later. */
5769 if (num_a8_relocs == a8_reloc_table_size)
5770 {
5771 a8_reloc_table_size *= 2;
5772 a8_relocs = (struct a8_erratum_reloc *)
5773 bfd_realloc (a8_relocs,
5774 sizeof (struct a8_erratum_reloc)
5775 * a8_reloc_table_size);
5776 }
5777
5778 a8_relocs[num_a8_relocs].from = from;
5779 a8_relocs[num_a8_relocs].destination = destination;
5780 a8_relocs[num_a8_relocs].r_type = r_type;
5781 a8_relocs[num_a8_relocs].branch_type = branch_type;
5782 a8_relocs[num_a8_relocs].sym_name = sym_name;
5783 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5784 a8_relocs[num_a8_relocs].hash = hash;
5785
5786 num_a8_relocs++;
5787 }
5788 }
5789 }
5790
5791 /* We're done with the internal relocs, free them. */
5792 if (elf_section_data (section)->relocs == NULL)
5793 free (internal_relocs);
5794 }
5795
5796 if (htab->fix_cortex_a8)
5797 {
5798 /* Sort relocs which might apply to Cortex-A8 erratum. */
5799 qsort (a8_relocs, num_a8_relocs,
5800 sizeof (struct a8_erratum_reloc),
5801 &a8_reloc_compare);
5802
5803 /* Scan for branches which might trigger Cortex-A8 erratum. */
5804 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5805 &num_a8_fixes, &a8_fix_table_size,
5806 a8_relocs, num_a8_relocs,
5807 prev_num_a8_fixes, &stub_changed)
5808 != 0)
5809 goto error_ret_free_local;
5810 }
5811
5812 if (local_syms != NULL
5813 && symtab_hdr->contents != (unsigned char *) local_syms)
5814 {
5815 if (!info->keep_memory)
5816 free (local_syms);
5817 else
5818 symtab_hdr->contents = (unsigned char *) local_syms;
5819 }
5820 }
5821
5822 if (prev_num_a8_fixes != num_a8_fixes)
5823 stub_changed = TRUE;
5824
5825 if (!stub_changed)
5826 break;
5827
5828 /* OK, we've added some stubs. Find out the new size of the
5829 stub sections. */
5830 for (stub_sec = htab->stub_bfd->sections;
5831 stub_sec != NULL;
5832 stub_sec = stub_sec->next)
5833 {
5834 /* Ignore non-stub sections. */
5835 if (!strstr (stub_sec->name, STUB_SUFFIX))
5836 continue;
5837
5838 stub_sec->size = 0;
5839 }
5840
5841 /* Compute stub section size, considering padding. */
5842 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5843 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
5844 stub_type++)
5845 {
5846 int size, padding;
5847 asection **stub_sec_p;
5848
5849 padding = arm_dedicated_stub_section_padding (stub_type);
5850 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
5851 /* Skip if no stub input section or no stub section padding
5852 required. */
5853 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
5854 continue;
5855 /* Stub section padding required but no dedicated section. */
5856 BFD_ASSERT (stub_sec_p);
5857
5858 size = (*stub_sec_p)->size;
5859 size = (size + padding - 1) & ~(padding - 1);
5860 (*stub_sec_p)->size = size;
5861 }
5862
5863 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5864 if (htab->fix_cortex_a8)
5865 for (i = 0; i < num_a8_fixes; i++)
5866 {
5867 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5868 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
5869
5870 if (stub_sec == NULL)
5871 return FALSE;
5872
5873 stub_sec->size
5874 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5875 NULL);
5876 }
5877
5878
5879 /* Ask the linker to do its stuff. */
5880 (*htab->layout_sections_again) ();
5881 }
5882
5883 /* Add stubs for Cortex-A8 erratum fixes now. */
5884 if (htab->fix_cortex_a8)
5885 {
5886 for (i = 0; i < num_a8_fixes; i++)
5887 {
5888 struct elf32_arm_stub_hash_entry *stub_entry;
5889 char *stub_name = a8_fixes[i].stub_name;
5890 asection *section = a8_fixes[i].section;
5891 unsigned int section_id = a8_fixes[i].section->id;
5892 asection *link_sec = htab->stub_group[section_id].link_sec;
5893 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5894 const insn_sequence *template_sequence;
5895 int template_size, size = 0;
5896
5897 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5898 TRUE, FALSE);
5899 if (stub_entry == NULL)
5900 {
5901 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5902 section->owner,
5903 stub_name);
5904 return FALSE;
5905 }
5906
5907 stub_entry->stub_sec = stub_sec;
5908 stub_entry->stub_offset = 0;
5909 stub_entry->id_sec = link_sec;
5910 stub_entry->stub_type = a8_fixes[i].stub_type;
5911 stub_entry->source_value = a8_fixes[i].offset;
5912 stub_entry->target_section = a8_fixes[i].section;
5913 stub_entry->target_value = a8_fixes[i].target_offset;
5914 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5915 stub_entry->branch_type = a8_fixes[i].branch_type;
5916
5917 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5918 &template_sequence,
5919 &template_size);
5920
5921 stub_entry->stub_size = size;
5922 stub_entry->stub_template = template_sequence;
5923 stub_entry->stub_template_size = template_size;
5924 }
5925
5926 /* Stash the Cortex-A8 erratum fix array for use later in
5927 elf32_arm_write_section(). */
5928 htab->a8_erratum_fixes = a8_fixes;
5929 htab->num_a8_erratum_fixes = num_a8_fixes;
5930 }
5931 else
5932 {
5933 htab->a8_erratum_fixes = NULL;
5934 htab->num_a8_erratum_fixes = 0;
5935 }
5936 return TRUE;
5937 }
5938
5939 /* Build all the stubs associated with the current output file. The
5940 stubs are kept in a hash table attached to the main linker hash
5941 table. We also set up the .plt entries for statically linked PIC
5942 functions here. This function is called via arm_elf_finish in the
5943 linker. */
5944
5945 bfd_boolean
5946 elf32_arm_build_stubs (struct bfd_link_info *info)
5947 {
5948 asection *stub_sec;
5949 struct bfd_hash_table *table;
5950 struct elf32_arm_link_hash_table *htab;
5951
5952 htab = elf32_arm_hash_table (info);
5953 if (htab == NULL)
5954 return FALSE;
5955
5956 for (stub_sec = htab->stub_bfd->sections;
5957 stub_sec != NULL;
5958 stub_sec = stub_sec->next)
5959 {
5960 bfd_size_type size;
5961
5962 /* Ignore non-stub sections. */
5963 if (!strstr (stub_sec->name, STUB_SUFFIX))
5964 continue;
5965
5966 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
5967 must at least be done for stub section requiring padding. */
5968 size = stub_sec->size;
5969 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5970 if (stub_sec->contents == NULL && size != 0)
5971 return FALSE;
5972 stub_sec->size = 0;
5973 }
5974
5975 /* Build the stubs as directed by the stub hash table. */
5976 table = &htab->stub_hash_table;
5977 bfd_hash_traverse (table, arm_build_one_stub, info);
5978 if (htab->fix_cortex_a8)
5979 {
5980 /* Place the cortex a8 stubs last. */
5981 htab->fix_cortex_a8 = -1;
5982 bfd_hash_traverse (table, arm_build_one_stub, info);
5983 }
5984
5985 return TRUE;
5986 }
5987
5988 /* Locate the Thumb encoded calling stub for NAME. */
5989
5990 static struct elf_link_hash_entry *
5991 find_thumb_glue (struct bfd_link_info *link_info,
5992 const char *name,
5993 char **error_message)
5994 {
5995 char *tmp_name;
5996 struct elf_link_hash_entry *hash;
5997 struct elf32_arm_link_hash_table *hash_table;
5998
5999 /* We need a pointer to the armelf specific hash table. */
6000 hash_table = elf32_arm_hash_table (link_info);
6001 if (hash_table == NULL)
6002 return NULL;
6003
6004 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6005 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
6006
6007 BFD_ASSERT (tmp_name);
6008
6009 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
6010
6011 hash = elf_link_hash_lookup
6012 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6013
6014 if (hash == NULL
6015 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
6016 tmp_name, name) == -1)
6017 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6018
6019 free (tmp_name);
6020
6021 return hash;
6022 }
6023
6024 /* Locate the ARM encoded calling stub for NAME. */
6025
6026 static struct elf_link_hash_entry *
6027 find_arm_glue (struct bfd_link_info *link_info,
6028 const char *name,
6029 char **error_message)
6030 {
6031 char *tmp_name;
6032 struct elf_link_hash_entry *myh;
6033 struct elf32_arm_link_hash_table *hash_table;
6034
6035 /* We need a pointer to the elfarm specific hash table. */
6036 hash_table = elf32_arm_hash_table (link_info);
6037 if (hash_table == NULL)
6038 return NULL;
6039
6040 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6041 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6042
6043 BFD_ASSERT (tmp_name);
6044
6045 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6046
6047 myh = elf_link_hash_lookup
6048 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6049
6050 if (myh == NULL
6051 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
6052 tmp_name, name) == -1)
6053 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6054
6055 free (tmp_name);
6056
6057 return myh;
6058 }
6059
6060 /* ARM->Thumb glue (static images):
6061
6062 .arm
6063 __func_from_arm:
6064 ldr r12, __func_addr
6065 bx r12
6066 __func_addr:
6067 .word func @ behave as if you saw a ARM_32 reloc.
6068
6069 (v5t static images)
6070 .arm
6071 __func_from_arm:
6072 ldr pc, __func_addr
6073 __func_addr:
6074 .word func @ behave as if you saw a ARM_32 reloc.
6075
6076 (relocatable images)
6077 .arm
6078 __func_from_arm:
6079 ldr r12, __func_offset
6080 add r12, r12, pc
6081 bx r12
6082 __func_offset:
6083 .word func - . */
6084
6085 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6086 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6087 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6088 static const insn32 a2t3_func_addr_insn = 0x00000001;
6089
6090 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6091 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6092 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6093
6094 #define ARM2THUMB_PIC_GLUE_SIZE 16
6095 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6096 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6097 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6098
6099 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6100
6101 .thumb .thumb
6102 .align 2 .align 2
6103 __func_from_thumb: __func_from_thumb:
6104 bx pc push {r6, lr}
6105 nop ldr r6, __func_addr
6106 .arm mov lr, pc
6107 b func bx r6
6108 .arm
6109 ;; back_to_thumb
6110 ldmia r13! {r6, lr}
6111 bx lr
6112 __func_addr:
6113 .word func */
6114
6115 #define THUMB2ARM_GLUE_SIZE 8
6116 static const insn16 t2a1_bx_pc_insn = 0x4778;
6117 static const insn16 t2a2_noop_insn = 0x46c0;
6118 static const insn32 t2a3_b_insn = 0xea000000;
6119
6120 #define VFP11_ERRATUM_VENEER_SIZE 8
6121 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6122 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6123
6124 #define ARM_BX_VENEER_SIZE 12
6125 static const insn32 armbx1_tst_insn = 0xe3100001;
6126 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6127 static const insn32 armbx3_bx_insn = 0xe12fff10;
6128
6129 #ifndef ELFARM_NABI_C_INCLUDED
6130 static void
6131 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6132 {
6133 asection * s;
6134 bfd_byte * contents;
6135
6136 if (size == 0)
6137 {
6138 /* Do not include empty glue sections in the output. */
6139 if (abfd != NULL)
6140 {
6141 s = bfd_get_linker_section (abfd, name);
6142 if (s != NULL)
6143 s->flags |= SEC_EXCLUDE;
6144 }
6145 return;
6146 }
6147
6148 BFD_ASSERT (abfd != NULL);
6149
6150 s = bfd_get_linker_section (abfd, name);
6151 BFD_ASSERT (s != NULL);
6152
6153 contents = (bfd_byte *) bfd_alloc (abfd, size);
6154
6155 BFD_ASSERT (s->size == size);
6156 s->contents = contents;
6157 }
6158
6159 bfd_boolean
6160 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6161 {
6162 struct elf32_arm_link_hash_table * globals;
6163
6164 globals = elf32_arm_hash_table (info);
6165 BFD_ASSERT (globals != NULL);
6166
6167 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6168 globals->arm_glue_size,
6169 ARM2THUMB_GLUE_SECTION_NAME);
6170
6171 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6172 globals->thumb_glue_size,
6173 THUMB2ARM_GLUE_SECTION_NAME);
6174
6175 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6176 globals->vfp11_erratum_glue_size,
6177 VFP11_ERRATUM_VENEER_SECTION_NAME);
6178
6179 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6180 globals->stm32l4xx_erratum_glue_size,
6181 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6182
6183 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6184 globals->bx_glue_size,
6185 ARM_BX_GLUE_SECTION_NAME);
6186
6187 return TRUE;
6188 }
6189
6190 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6191 returns the symbol identifying the stub. */
6192
6193 static struct elf_link_hash_entry *
6194 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6195 struct elf_link_hash_entry * h)
6196 {
6197 const char * name = h->root.root.string;
6198 asection * s;
6199 char * tmp_name;
6200 struct elf_link_hash_entry * myh;
6201 struct bfd_link_hash_entry * bh;
6202 struct elf32_arm_link_hash_table * globals;
6203 bfd_vma val;
6204 bfd_size_type size;
6205
6206 globals = elf32_arm_hash_table (link_info);
6207 BFD_ASSERT (globals != NULL);
6208 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6209
6210 s = bfd_get_linker_section
6211 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6212
6213 BFD_ASSERT (s != NULL);
6214
6215 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6216 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6217
6218 BFD_ASSERT (tmp_name);
6219
6220 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6221
6222 myh = elf_link_hash_lookup
6223 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6224
6225 if (myh != NULL)
6226 {
6227 /* We've already seen this guy. */
6228 free (tmp_name);
6229 return myh;
6230 }
6231
6232 /* The only trick here is using hash_table->arm_glue_size as the value.
6233 Even though the section isn't allocated yet, this is where we will be
6234 putting it. The +1 on the value marks that the stub has not been
6235 output yet - not that it is a Thumb function. */
6236 bh = NULL;
6237 val = globals->arm_glue_size + 1;
6238 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6239 tmp_name, BSF_GLOBAL, s, val,
6240 NULL, TRUE, FALSE, &bh);
6241
6242 myh = (struct elf_link_hash_entry *) bh;
6243 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6244 myh->forced_local = 1;
6245
6246 free (tmp_name);
6247
6248 if (bfd_link_pic (link_info)
6249 || globals->root.is_relocatable_executable
6250 || globals->pic_veneer)
6251 size = ARM2THUMB_PIC_GLUE_SIZE;
6252 else if (globals->use_blx)
6253 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6254 else
6255 size = ARM2THUMB_STATIC_GLUE_SIZE;
6256
6257 s->size += size;
6258 globals->arm_glue_size += size;
6259
6260 return myh;
6261 }
6262
6263 /* Allocate space for ARMv4 BX veneers. */
6264
6265 static void
6266 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6267 {
6268 asection * s;
6269 struct elf32_arm_link_hash_table *globals;
6270 char *tmp_name;
6271 struct elf_link_hash_entry *myh;
6272 struct bfd_link_hash_entry *bh;
6273 bfd_vma val;
6274
6275 /* BX PC does not need a veneer. */
6276 if (reg == 15)
6277 return;
6278
6279 globals = elf32_arm_hash_table (link_info);
6280 BFD_ASSERT (globals != NULL);
6281 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6282
6283 /* Check if this veneer has already been allocated. */
6284 if (globals->bx_glue_offset[reg])
6285 return;
6286
6287 s = bfd_get_linker_section
6288 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6289
6290 BFD_ASSERT (s != NULL);
6291
6292 /* Add symbol for veneer. */
6293 tmp_name = (char *)
6294 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6295
6296 BFD_ASSERT (tmp_name);
6297
6298 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6299
6300 myh = elf_link_hash_lookup
6301 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6302
6303 BFD_ASSERT (myh == NULL);
6304
6305 bh = NULL;
6306 val = globals->bx_glue_size;
6307 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6308 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6309 NULL, TRUE, FALSE, &bh);
6310
6311 myh = (struct elf_link_hash_entry *) bh;
6312 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6313 myh->forced_local = 1;
6314
6315 s->size += ARM_BX_VENEER_SIZE;
6316 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6317 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6318 }
6319
6320
6321 /* Add an entry to the code/data map for section SEC. */
6322
6323 static void
6324 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6325 {
6326 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6327 unsigned int newidx;
6328
6329 if (sec_data->map == NULL)
6330 {
6331 sec_data->map = (elf32_arm_section_map *)
6332 bfd_malloc (sizeof (elf32_arm_section_map));
6333 sec_data->mapcount = 0;
6334 sec_data->mapsize = 1;
6335 }
6336
6337 newidx = sec_data->mapcount++;
6338
6339 if (sec_data->mapcount > sec_data->mapsize)
6340 {
6341 sec_data->mapsize *= 2;
6342 sec_data->map = (elf32_arm_section_map *)
6343 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6344 * sizeof (elf32_arm_section_map));
6345 }
6346
6347 if (sec_data->map)
6348 {
6349 sec_data->map[newidx].vma = vma;
6350 sec_data->map[newidx].type = type;
6351 }
6352 }
6353
6354
6355 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6356 veneers are handled for now. */
6357
6358 static bfd_vma
6359 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6360 elf32_vfp11_erratum_list *branch,
6361 bfd *branch_bfd,
6362 asection *branch_sec,
6363 unsigned int offset)
6364 {
6365 asection *s;
6366 struct elf32_arm_link_hash_table *hash_table;
6367 char *tmp_name;
6368 struct elf_link_hash_entry *myh;
6369 struct bfd_link_hash_entry *bh;
6370 bfd_vma val;
6371 struct _arm_elf_section_data *sec_data;
6372 elf32_vfp11_erratum_list *newerr;
6373
6374 hash_table = elf32_arm_hash_table (link_info);
6375 BFD_ASSERT (hash_table != NULL);
6376 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6377
6378 s = bfd_get_linker_section
6379 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6380
6381 sec_data = elf32_arm_section_data (s);
6382
6383 BFD_ASSERT (s != NULL);
6384
6385 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6386 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6387
6388 BFD_ASSERT (tmp_name);
6389
6390 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6391 hash_table->num_vfp11_fixes);
6392
6393 myh = elf_link_hash_lookup
6394 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6395
6396 BFD_ASSERT (myh == NULL);
6397
6398 bh = NULL;
6399 val = hash_table->vfp11_erratum_glue_size;
6400 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6401 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6402 NULL, TRUE, FALSE, &bh);
6403
6404 myh = (struct elf_link_hash_entry *) bh;
6405 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6406 myh->forced_local = 1;
6407
6408 /* Link veneer back to calling location. */
6409 sec_data->erratumcount += 1;
6410 newerr = (elf32_vfp11_erratum_list *)
6411 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6412
6413 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6414 newerr->vma = -1;
6415 newerr->u.v.branch = branch;
6416 newerr->u.v.id = hash_table->num_vfp11_fixes;
6417 branch->u.b.veneer = newerr;
6418
6419 newerr->next = sec_data->erratumlist;
6420 sec_data->erratumlist = newerr;
6421
6422 /* A symbol for the return from the veneer. */
6423 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6424 hash_table->num_vfp11_fixes);
6425
6426 myh = elf_link_hash_lookup
6427 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6428
6429 if (myh != NULL)
6430 abort ();
6431
6432 bh = NULL;
6433 val = offset + 4;
6434 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6435 branch_sec, val, NULL, TRUE, FALSE, &bh);
6436
6437 myh = (struct elf_link_hash_entry *) bh;
6438 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6439 myh->forced_local = 1;
6440
6441 free (tmp_name);
6442
6443 /* Generate a mapping symbol for the veneer section, and explicitly add an
6444 entry for that symbol to the code/data map for the section. */
6445 if (hash_table->vfp11_erratum_glue_size == 0)
6446 {
6447 bh = NULL;
6448 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6449 ever requires this erratum fix. */
6450 _bfd_generic_link_add_one_symbol (link_info,
6451 hash_table->bfd_of_glue_owner, "$a",
6452 BSF_LOCAL, s, 0, NULL,
6453 TRUE, FALSE, &bh);
6454
6455 myh = (struct elf_link_hash_entry *) bh;
6456 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6457 myh->forced_local = 1;
6458
6459 /* The elf32_arm_init_maps function only cares about symbols from input
6460 BFDs. We must make a note of this generated mapping symbol
6461 ourselves so that code byteswapping works properly in
6462 elf32_arm_write_section. */
6463 elf32_arm_section_map_add (s, 'a', 0);
6464 }
6465
6466 s->size += VFP11_ERRATUM_VENEER_SIZE;
6467 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6468 hash_table->num_vfp11_fixes++;
6469
6470 /* The offset of the veneer. */
6471 return val;
6472 }
6473
6474 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6475 veneers need to be handled because used only in Cortex-M. */
6476
6477 static bfd_vma
6478 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6479 elf32_stm32l4xx_erratum_list *branch,
6480 bfd *branch_bfd,
6481 asection *branch_sec,
6482 unsigned int offset,
6483 bfd_size_type veneer_size)
6484 {
6485 asection *s;
6486 struct elf32_arm_link_hash_table *hash_table;
6487 char *tmp_name;
6488 struct elf_link_hash_entry *myh;
6489 struct bfd_link_hash_entry *bh;
6490 bfd_vma val;
6491 struct _arm_elf_section_data *sec_data;
6492 elf32_stm32l4xx_erratum_list *newerr;
6493
6494 hash_table = elf32_arm_hash_table (link_info);
6495 BFD_ASSERT (hash_table != NULL);
6496 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6497
6498 s = bfd_get_linker_section
6499 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6500
6501 BFD_ASSERT (s != NULL);
6502
6503 sec_data = elf32_arm_section_data (s);
6504
6505 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6506 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6507
6508 BFD_ASSERT (tmp_name);
6509
6510 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6511 hash_table->num_stm32l4xx_fixes);
6512
6513 myh = elf_link_hash_lookup
6514 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6515
6516 BFD_ASSERT (myh == NULL);
6517
6518 bh = NULL;
6519 val = hash_table->stm32l4xx_erratum_glue_size;
6520 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6521 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6522 NULL, TRUE, FALSE, &bh);
6523
6524 myh = (struct elf_link_hash_entry *) bh;
6525 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6526 myh->forced_local = 1;
6527
6528 /* Link veneer back to calling location. */
6529 sec_data->stm32l4xx_erratumcount += 1;
6530 newerr = (elf32_stm32l4xx_erratum_list *)
6531 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6532
6533 newerr->type = STM32L4XX_ERRATUM_VENEER;
6534 newerr->vma = -1;
6535 newerr->u.v.branch = branch;
6536 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6537 branch->u.b.veneer = newerr;
6538
6539 newerr->next = sec_data->stm32l4xx_erratumlist;
6540 sec_data->stm32l4xx_erratumlist = newerr;
6541
6542 /* A symbol for the return from the veneer. */
6543 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6544 hash_table->num_stm32l4xx_fixes);
6545
6546 myh = elf_link_hash_lookup
6547 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6548
6549 if (myh != NULL)
6550 abort ();
6551
6552 bh = NULL;
6553 val = offset + 4;
6554 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6555 branch_sec, val, NULL, TRUE, FALSE, &bh);
6556
6557 myh = (struct elf_link_hash_entry *) bh;
6558 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6559 myh->forced_local = 1;
6560
6561 free (tmp_name);
6562
6563 /* Generate a mapping symbol for the veneer section, and explicitly add an
6564 entry for that symbol to the code/data map for the section. */
6565 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6566 {
6567 bh = NULL;
6568 /* Creates a THUMB symbol since there is no other choice. */
6569 _bfd_generic_link_add_one_symbol (link_info,
6570 hash_table->bfd_of_glue_owner, "$t",
6571 BSF_LOCAL, s, 0, NULL,
6572 TRUE, FALSE, &bh);
6573
6574 myh = (struct elf_link_hash_entry *) bh;
6575 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6576 myh->forced_local = 1;
6577
6578 /* The elf32_arm_init_maps function only cares about symbols from input
6579 BFDs. We must make a note of this generated mapping symbol
6580 ourselves so that code byteswapping works properly in
6581 elf32_arm_write_section. */
6582 elf32_arm_section_map_add (s, 't', 0);
6583 }
6584
6585 s->size += veneer_size;
6586 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6587 hash_table->num_stm32l4xx_fixes++;
6588
6589 /* The offset of the veneer. */
6590 return val;
6591 }
6592
6593 #define ARM_GLUE_SECTION_FLAGS \
6594 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6595 | SEC_READONLY | SEC_LINKER_CREATED)
6596
6597 /* Create a fake section for use by the ARM backend of the linker. */
6598
6599 static bfd_boolean
6600 arm_make_glue_section (bfd * abfd, const char * name)
6601 {
6602 asection * sec;
6603
6604 sec = bfd_get_linker_section (abfd, name);
6605 if (sec != NULL)
6606 /* Already made. */
6607 return TRUE;
6608
6609 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6610
6611 if (sec == NULL
6612 || !bfd_set_section_alignment (abfd, sec, 2))
6613 return FALSE;
6614
6615 /* Set the gc mark to prevent the section from being removed by garbage
6616 collection, despite the fact that no relocs refer to this section. */
6617 sec->gc_mark = 1;
6618
6619 return TRUE;
6620 }
6621
6622 /* Set size of .plt entries. This function is called from the
6623 linker scripts in ld/emultempl/{armelf}.em. */
6624
6625 void
6626 bfd_elf32_arm_use_long_plt (void)
6627 {
6628 elf32_arm_use_long_plt_entry = TRUE;
6629 }
6630
6631 /* Add the glue sections to ABFD. This function is called from the
6632 linker scripts in ld/emultempl/{armelf}.em. */
6633
6634 bfd_boolean
6635 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6636 struct bfd_link_info *info)
6637 {
6638 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6639 bfd_boolean dostm32l4xx = globals
6640 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6641 bfd_boolean addglue;
6642
6643 /* If we are only performing a partial
6644 link do not bother adding the glue. */
6645 if (bfd_link_relocatable (info))
6646 return TRUE;
6647
6648 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6649 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6650 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6651 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6652
6653 if (!dostm32l4xx)
6654 return addglue;
6655
6656 return addglue
6657 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6658 }
6659
6660 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
6661 ensures they are not marked for deletion by
6662 strip_excluded_output_sections () when veneers are going to be created
6663 later. Not doing so would trigger assert on empty section size in
6664 lang_size_sections_1 (). */
6665
6666 void
6667 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
6668 {
6669 enum elf32_arm_stub_type stub_type;
6670
6671 /* If we are only performing a partial
6672 link do not bother adding the glue. */
6673 if (bfd_link_relocatable (info))
6674 return;
6675
6676 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6677 {
6678 asection *out_sec;
6679 const char *out_sec_name;
6680
6681 if (!arm_dedicated_stub_output_section_required (stub_type))
6682 continue;
6683
6684 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
6685 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
6686 if (out_sec != NULL)
6687 out_sec->flags |= SEC_KEEP;
6688 }
6689 }
6690
6691 /* Select a BFD to be used to hold the sections used by the glue code.
6692 This function is called from the linker scripts in ld/emultempl/
6693 {armelf/pe}.em. */
6694
6695 bfd_boolean
6696 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6697 {
6698 struct elf32_arm_link_hash_table *globals;
6699
6700 /* If we are only performing a partial link
6701 do not bother getting a bfd to hold the glue. */
6702 if (bfd_link_relocatable (info))
6703 return TRUE;
6704
6705 /* Make sure we don't attach the glue sections to a dynamic object. */
6706 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6707
6708 globals = elf32_arm_hash_table (info);
6709 BFD_ASSERT (globals != NULL);
6710
6711 if (globals->bfd_of_glue_owner != NULL)
6712 return TRUE;
6713
6714 /* Save the bfd for later use. */
6715 globals->bfd_of_glue_owner = abfd;
6716
6717 return TRUE;
6718 }
6719
6720 static void
6721 check_use_blx (struct elf32_arm_link_hash_table *globals)
6722 {
6723 int cpu_arch;
6724
6725 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6726 Tag_CPU_arch);
6727
6728 if (globals->fix_arm1176)
6729 {
6730 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6731 globals->use_blx = 1;
6732 }
6733 else
6734 {
6735 if (cpu_arch > TAG_CPU_ARCH_V4T)
6736 globals->use_blx = 1;
6737 }
6738 }
6739
6740 bfd_boolean
6741 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6742 struct bfd_link_info *link_info)
6743 {
6744 Elf_Internal_Shdr *symtab_hdr;
6745 Elf_Internal_Rela *internal_relocs = NULL;
6746 Elf_Internal_Rela *irel, *irelend;
6747 bfd_byte *contents = NULL;
6748
6749 asection *sec;
6750 struct elf32_arm_link_hash_table *globals;
6751
6752 /* If we are only performing a partial link do not bother
6753 to construct any glue. */
6754 if (bfd_link_relocatable (link_info))
6755 return TRUE;
6756
6757 /* Here we have a bfd that is to be included on the link. We have a
6758 hook to do reloc rummaging, before section sizes are nailed down. */
6759 globals = elf32_arm_hash_table (link_info);
6760 BFD_ASSERT (globals != NULL);
6761
6762 check_use_blx (globals);
6763
6764 if (globals->byteswap_code && !bfd_big_endian (abfd))
6765 {
6766 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6767 abfd);
6768 return FALSE;
6769 }
6770
6771 /* PR 5398: If we have not decided to include any loadable sections in
6772 the output then we will not have a glue owner bfd. This is OK, it
6773 just means that there is nothing else for us to do here. */
6774 if (globals->bfd_of_glue_owner == NULL)
6775 return TRUE;
6776
6777 /* Rummage around all the relocs and map the glue vectors. */
6778 sec = abfd->sections;
6779
6780 if (sec == NULL)
6781 return TRUE;
6782
6783 for (; sec != NULL; sec = sec->next)
6784 {
6785 if (sec->reloc_count == 0)
6786 continue;
6787
6788 if ((sec->flags & SEC_EXCLUDE) != 0)
6789 continue;
6790
6791 symtab_hdr = & elf_symtab_hdr (abfd);
6792
6793 /* Load the relocs. */
6794 internal_relocs
6795 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6796
6797 if (internal_relocs == NULL)
6798 goto error_return;
6799
6800 irelend = internal_relocs + sec->reloc_count;
6801 for (irel = internal_relocs; irel < irelend; irel++)
6802 {
6803 long r_type;
6804 unsigned long r_index;
6805
6806 struct elf_link_hash_entry *h;
6807
6808 r_type = ELF32_R_TYPE (irel->r_info);
6809 r_index = ELF32_R_SYM (irel->r_info);
6810
6811 /* These are the only relocation types we care about. */
6812 if ( r_type != R_ARM_PC24
6813 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6814 continue;
6815
6816 /* Get the section contents if we haven't done so already. */
6817 if (contents == NULL)
6818 {
6819 /* Get cached copy if it exists. */
6820 if (elf_section_data (sec)->this_hdr.contents != NULL)
6821 contents = elf_section_data (sec)->this_hdr.contents;
6822 else
6823 {
6824 /* Go get them off disk. */
6825 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6826 goto error_return;
6827 }
6828 }
6829
6830 if (r_type == R_ARM_V4BX)
6831 {
6832 int reg;
6833
6834 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6835 record_arm_bx_glue (link_info, reg);
6836 continue;
6837 }
6838
6839 /* If the relocation is not against a symbol it cannot concern us. */
6840 h = NULL;
6841
6842 /* We don't care about local symbols. */
6843 if (r_index < symtab_hdr->sh_info)
6844 continue;
6845
6846 /* This is an external symbol. */
6847 r_index -= symtab_hdr->sh_info;
6848 h = (struct elf_link_hash_entry *)
6849 elf_sym_hashes (abfd)[r_index];
6850
6851 /* If the relocation is against a static symbol it must be within
6852 the current section and so cannot be a cross ARM/Thumb relocation. */
6853 if (h == NULL)
6854 continue;
6855
6856 /* If the call will go through a PLT entry then we do not need
6857 glue. */
6858 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6859 continue;
6860
6861 switch (r_type)
6862 {
6863 case R_ARM_PC24:
6864 /* This one is a call from arm code. We need to look up
6865 the target of the call. If it is a thumb target, we
6866 insert glue. */
6867 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6868 == ST_BRANCH_TO_THUMB)
6869 record_arm_to_thumb_glue (link_info, h);
6870 break;
6871
6872 default:
6873 abort ();
6874 }
6875 }
6876
6877 if (contents != NULL
6878 && elf_section_data (sec)->this_hdr.contents != contents)
6879 free (contents);
6880 contents = NULL;
6881
6882 if (internal_relocs != NULL
6883 && elf_section_data (sec)->relocs != internal_relocs)
6884 free (internal_relocs);
6885 internal_relocs = NULL;
6886 }
6887
6888 return TRUE;
6889
6890 error_return:
6891 if (contents != NULL
6892 && elf_section_data (sec)->this_hdr.contents != contents)
6893 free (contents);
6894 if (internal_relocs != NULL
6895 && elf_section_data (sec)->relocs != internal_relocs)
6896 free (internal_relocs);
6897
6898 return FALSE;
6899 }
6900 #endif
6901
6902
6903 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6904
6905 void
6906 bfd_elf32_arm_init_maps (bfd *abfd)
6907 {
6908 Elf_Internal_Sym *isymbuf;
6909 Elf_Internal_Shdr *hdr;
6910 unsigned int i, localsyms;
6911
6912 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6913 if (! is_arm_elf (abfd))
6914 return;
6915
6916 if ((abfd->flags & DYNAMIC) != 0)
6917 return;
6918
6919 hdr = & elf_symtab_hdr (abfd);
6920 localsyms = hdr->sh_info;
6921
6922 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6923 should contain the number of local symbols, which should come before any
6924 global symbols. Mapping symbols are always local. */
6925 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6926 NULL);
6927
6928 /* No internal symbols read? Skip this BFD. */
6929 if (isymbuf == NULL)
6930 return;
6931
6932 for (i = 0; i < localsyms; i++)
6933 {
6934 Elf_Internal_Sym *isym = &isymbuf[i];
6935 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6936 const char *name;
6937
6938 if (sec != NULL
6939 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6940 {
6941 name = bfd_elf_string_from_elf_section (abfd,
6942 hdr->sh_link, isym->st_name);
6943
6944 if (bfd_is_arm_special_symbol_name (name,
6945 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6946 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6947 }
6948 }
6949 }
6950
6951
6952 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6953 say what they wanted. */
6954
6955 void
6956 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6957 {
6958 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6959 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6960
6961 if (globals == NULL)
6962 return;
6963
6964 if (globals->fix_cortex_a8 == -1)
6965 {
6966 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6967 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6968 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6969 || out_attr[Tag_CPU_arch_profile].i == 0))
6970 globals->fix_cortex_a8 = 1;
6971 else
6972 globals->fix_cortex_a8 = 0;
6973 }
6974 }
6975
6976
6977 void
6978 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6979 {
6980 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6981 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6982
6983 if (globals == NULL)
6984 return;
6985 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6986 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6987 {
6988 switch (globals->vfp11_fix)
6989 {
6990 case BFD_ARM_VFP11_FIX_DEFAULT:
6991 case BFD_ARM_VFP11_FIX_NONE:
6992 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6993 break;
6994
6995 default:
6996 /* Give a warning, but do as the user requests anyway. */
6997 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6998 "workaround is not necessary for target architecture"), obfd);
6999 }
7000 }
7001 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
7002 /* For earlier architectures, we might need the workaround, but do not
7003 enable it by default. If users is running with broken hardware, they
7004 must enable the erratum fix explicitly. */
7005 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7006 }
7007
7008 void
7009 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
7010 {
7011 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7012 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7013
7014 if (globals == NULL)
7015 return;
7016
7017 /* We assume only Cortex-M4 may require the fix. */
7018 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
7019 || out_attr[Tag_CPU_arch_profile].i != 'M')
7020 {
7021 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
7022 /* Give a warning, but do as the user requests anyway. */
7023 (*_bfd_error_handler)
7024 (_("%B: warning: selected STM32L4XX erratum "
7025 "workaround is not necessary for target architecture"), obfd);
7026 }
7027 }
7028
7029 enum bfd_arm_vfp11_pipe
7030 {
7031 VFP11_FMAC,
7032 VFP11_LS,
7033 VFP11_DS,
7034 VFP11_BAD
7035 };
7036
7037 /* Return a VFP register number. This is encoded as RX:X for single-precision
7038 registers, or X:RX for double-precision registers, where RX is the group of
7039 four bits in the instruction encoding and X is the single extension bit.
7040 RX and X fields are specified using their lowest (starting) bit. The return
7041 value is:
7042
7043 0...31: single-precision registers s0...s31
7044 32...63: double-precision registers d0...d31.
7045
7046 Although X should be zero for VFP11 (encoding d0...d15 only), we might
7047 encounter VFP3 instructions, so we allow the full range for DP registers. */
7048
7049 static unsigned int
7050 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
7051 unsigned int x)
7052 {
7053 if (is_double)
7054 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
7055 else
7056 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
7057 }
7058
7059 /* Set bits in *WMASK according to a register number REG as encoded by
7060 bfd_arm_vfp11_regno(). Ignore d16-d31. */
7061
7062 static void
7063 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
7064 {
7065 if (reg < 32)
7066 *wmask |= 1 << reg;
7067 else if (reg < 48)
7068 *wmask |= 3 << ((reg - 32) * 2);
7069 }
7070
7071 /* Return TRUE if WMASK overwrites anything in REGS. */
7072
7073 static bfd_boolean
7074 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
7075 {
7076 int i;
7077
7078 for (i = 0; i < numregs; i++)
7079 {
7080 unsigned int reg = regs[i];
7081
7082 if (reg < 32 && (wmask & (1 << reg)) != 0)
7083 return TRUE;
7084
7085 reg -= 32;
7086
7087 if (reg >= 16)
7088 continue;
7089
7090 if ((wmask & (3 << (reg * 2))) != 0)
7091 return TRUE;
7092 }
7093
7094 return FALSE;
7095 }
7096
7097 /* In this function, we're interested in two things: finding input registers
7098 for VFP data-processing instructions, and finding the set of registers which
7099 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7100 hold the written set, so FLDM etc. are easy to deal with (we're only
7101 interested in 32 SP registers or 16 dp registers, due to the VFP version
7102 implemented by the chip in question). DP registers are marked by setting
7103 both SP registers in the write mask). */
7104
7105 static enum bfd_arm_vfp11_pipe
7106 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7107 int *numregs)
7108 {
7109 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7110 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7111
7112 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7113 {
7114 unsigned int pqrs;
7115 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7116 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7117
7118 pqrs = ((insn & 0x00800000) >> 20)
7119 | ((insn & 0x00300000) >> 19)
7120 | ((insn & 0x00000040) >> 6);
7121
7122 switch (pqrs)
7123 {
7124 case 0: /* fmac[sd]. */
7125 case 1: /* fnmac[sd]. */
7126 case 2: /* fmsc[sd]. */
7127 case 3: /* fnmsc[sd]. */
7128 vpipe = VFP11_FMAC;
7129 bfd_arm_vfp11_write_mask (destmask, fd);
7130 regs[0] = fd;
7131 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7132 regs[2] = fm;
7133 *numregs = 3;
7134 break;
7135
7136 case 4: /* fmul[sd]. */
7137 case 5: /* fnmul[sd]. */
7138 case 6: /* fadd[sd]. */
7139 case 7: /* fsub[sd]. */
7140 vpipe = VFP11_FMAC;
7141 goto vfp_binop;
7142
7143 case 8: /* fdiv[sd]. */
7144 vpipe = VFP11_DS;
7145 vfp_binop:
7146 bfd_arm_vfp11_write_mask (destmask, fd);
7147 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7148 regs[1] = fm;
7149 *numregs = 2;
7150 break;
7151
7152 case 15: /* extended opcode. */
7153 {
7154 unsigned int extn = ((insn >> 15) & 0x1e)
7155 | ((insn >> 7) & 1);
7156
7157 switch (extn)
7158 {
7159 case 0: /* fcpy[sd]. */
7160 case 1: /* fabs[sd]. */
7161 case 2: /* fneg[sd]. */
7162 case 8: /* fcmp[sd]. */
7163 case 9: /* fcmpe[sd]. */
7164 case 10: /* fcmpz[sd]. */
7165 case 11: /* fcmpez[sd]. */
7166 case 16: /* fuito[sd]. */
7167 case 17: /* fsito[sd]. */
7168 case 24: /* ftoui[sd]. */
7169 case 25: /* ftouiz[sd]. */
7170 case 26: /* ftosi[sd]. */
7171 case 27: /* ftosiz[sd]. */
7172 /* These instructions will not bounce due to underflow. */
7173 *numregs = 0;
7174 vpipe = VFP11_FMAC;
7175 break;
7176
7177 case 3: /* fsqrt[sd]. */
7178 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7179 registers to cause the erratum in previous instructions. */
7180 bfd_arm_vfp11_write_mask (destmask, fd);
7181 vpipe = VFP11_DS;
7182 break;
7183
7184 case 15: /* fcvt{ds,sd}. */
7185 {
7186 int rnum = 0;
7187
7188 bfd_arm_vfp11_write_mask (destmask, fd);
7189
7190 /* Only FCVTSD can underflow. */
7191 if ((insn & 0x100) != 0)
7192 regs[rnum++] = fm;
7193
7194 *numregs = rnum;
7195
7196 vpipe = VFP11_FMAC;
7197 }
7198 break;
7199
7200 default:
7201 return VFP11_BAD;
7202 }
7203 }
7204 break;
7205
7206 default:
7207 return VFP11_BAD;
7208 }
7209 }
7210 /* Two-register transfer. */
7211 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7212 {
7213 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7214
7215 if ((insn & 0x100000) == 0)
7216 {
7217 if (is_double)
7218 bfd_arm_vfp11_write_mask (destmask, fm);
7219 else
7220 {
7221 bfd_arm_vfp11_write_mask (destmask, fm);
7222 bfd_arm_vfp11_write_mask (destmask, fm + 1);
7223 }
7224 }
7225
7226 vpipe = VFP11_LS;
7227 }
7228 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
7229 {
7230 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7231 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7232
7233 switch (puw)
7234 {
7235 case 0: /* Two-reg transfer. We should catch these above. */
7236 abort ();
7237
7238 case 2: /* fldm[sdx]. */
7239 case 3:
7240 case 5:
7241 {
7242 unsigned int i, offset = insn & 0xff;
7243
7244 if (is_double)
7245 offset >>= 1;
7246
7247 for (i = fd; i < fd + offset; i++)
7248 bfd_arm_vfp11_write_mask (destmask, i);
7249 }
7250 break;
7251
7252 case 4: /* fld[sd]. */
7253 case 6:
7254 bfd_arm_vfp11_write_mask (destmask, fd);
7255 break;
7256
7257 default:
7258 return VFP11_BAD;
7259 }
7260
7261 vpipe = VFP11_LS;
7262 }
7263 /* Single-register transfer. Note L==0. */
7264 else if ((insn & 0x0f100e10) == 0x0e000a10)
7265 {
7266 unsigned int opcode = (insn >> 21) & 7;
7267 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7268
7269 switch (opcode)
7270 {
7271 case 0: /* fmsr/fmdlr. */
7272 case 1: /* fmdhr. */
7273 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7274 destination register. I don't know if this is exactly right,
7275 but it is the conservative choice. */
7276 bfd_arm_vfp11_write_mask (destmask, fn);
7277 break;
7278
7279 case 7: /* fmxr. */
7280 break;
7281 }
7282
7283 vpipe = VFP11_LS;
7284 }
7285
7286 return vpipe;
7287 }
7288
7289
7290 static int elf32_arm_compare_mapping (const void * a, const void * b);
7291
7292
7293 /* Look for potentially-troublesome code sequences which might trigger the
7294 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7295 (available from ARM) for details of the erratum. A short version is
7296 described in ld.texinfo. */
7297
7298 bfd_boolean
7299 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7300 {
7301 asection *sec;
7302 bfd_byte *contents = NULL;
7303 int state = 0;
7304 int regs[3], numregs = 0;
7305 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7306 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7307
7308 if (globals == NULL)
7309 return FALSE;
7310
7311 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7312 The states transition as follows:
7313
7314 0 -> 1 (vector) or 0 -> 2 (scalar)
7315 A VFP FMAC-pipeline instruction has been seen. Fill
7316 regs[0]..regs[numregs-1] with its input operands. Remember this
7317 instruction in 'first_fmac'.
7318
7319 1 -> 2
7320 Any instruction, except for a VFP instruction which overwrites
7321 regs[*].
7322
7323 1 -> 3 [ -> 0 ] or
7324 2 -> 3 [ -> 0 ]
7325 A VFP instruction has been seen which overwrites any of regs[*].
7326 We must make a veneer! Reset state to 0 before examining next
7327 instruction.
7328
7329 2 -> 0
7330 If we fail to match anything in state 2, reset to state 0 and reset
7331 the instruction pointer to the instruction after 'first_fmac'.
7332
7333 If the VFP11 vector mode is in use, there must be at least two unrelated
7334 instructions between anti-dependent VFP11 instructions to properly avoid
7335 triggering the erratum, hence the use of the extra state 1. */
7336
7337 /* If we are only performing a partial link do not bother
7338 to construct any glue. */
7339 if (bfd_link_relocatable (link_info))
7340 return TRUE;
7341
7342 /* Skip if this bfd does not correspond to an ELF image. */
7343 if (! is_arm_elf (abfd))
7344 return TRUE;
7345
7346 /* We should have chosen a fix type by the time we get here. */
7347 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7348
7349 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7350 return TRUE;
7351
7352 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7353 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7354 return TRUE;
7355
7356 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7357 {
7358 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7359 struct _arm_elf_section_data *sec_data;
7360
7361 /* If we don't have executable progbits, we're not interested in this
7362 section. Also skip if section is to be excluded. */
7363 if (elf_section_type (sec) != SHT_PROGBITS
7364 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7365 || (sec->flags & SEC_EXCLUDE) != 0
7366 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7367 || sec->output_section == bfd_abs_section_ptr
7368 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7369 continue;
7370
7371 sec_data = elf32_arm_section_data (sec);
7372
7373 if (sec_data->mapcount == 0)
7374 continue;
7375
7376 if (elf_section_data (sec)->this_hdr.contents != NULL)
7377 contents = elf_section_data (sec)->this_hdr.contents;
7378 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7379 goto error_return;
7380
7381 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7382 elf32_arm_compare_mapping);
7383
7384 for (span = 0; span < sec_data->mapcount; span++)
7385 {
7386 unsigned int span_start = sec_data->map[span].vma;
7387 unsigned int span_end = (span == sec_data->mapcount - 1)
7388 ? sec->size : sec_data->map[span + 1].vma;
7389 char span_type = sec_data->map[span].type;
7390
7391 /* FIXME: Only ARM mode is supported at present. We may need to
7392 support Thumb-2 mode also at some point. */
7393 if (span_type != 'a')
7394 continue;
7395
7396 for (i = span_start; i < span_end;)
7397 {
7398 unsigned int next_i = i + 4;
7399 unsigned int insn = bfd_big_endian (abfd)
7400 ? (contents[i] << 24)
7401 | (contents[i + 1] << 16)
7402 | (contents[i + 2] << 8)
7403 | contents[i + 3]
7404 : (contents[i + 3] << 24)
7405 | (contents[i + 2] << 16)
7406 | (contents[i + 1] << 8)
7407 | contents[i];
7408 unsigned int writemask = 0;
7409 enum bfd_arm_vfp11_pipe vpipe;
7410
7411 switch (state)
7412 {
7413 case 0:
7414 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7415 &numregs);
7416 /* I'm assuming the VFP11 erratum can trigger with denorm
7417 operands on either the FMAC or the DS pipeline. This might
7418 lead to slightly overenthusiastic veneer insertion. */
7419 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7420 {
7421 state = use_vector ? 1 : 2;
7422 first_fmac = i;
7423 veneer_of_insn = insn;
7424 }
7425 break;
7426
7427 case 1:
7428 {
7429 int other_regs[3], other_numregs;
7430 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7431 other_regs,
7432 &other_numregs);
7433 if (vpipe != VFP11_BAD
7434 && bfd_arm_vfp11_antidependency (writemask, regs,
7435 numregs))
7436 state = 3;
7437 else
7438 state = 2;
7439 }
7440 break;
7441
7442 case 2:
7443 {
7444 int other_regs[3], other_numregs;
7445 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7446 other_regs,
7447 &other_numregs);
7448 if (vpipe != VFP11_BAD
7449 && bfd_arm_vfp11_antidependency (writemask, regs,
7450 numregs))
7451 state = 3;
7452 else
7453 {
7454 state = 0;
7455 next_i = first_fmac + 4;
7456 }
7457 }
7458 break;
7459
7460 case 3:
7461 abort (); /* Should be unreachable. */
7462 }
7463
7464 if (state == 3)
7465 {
7466 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7467 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7468
7469 elf32_arm_section_data (sec)->erratumcount += 1;
7470
7471 newerr->u.b.vfp_insn = veneer_of_insn;
7472
7473 switch (span_type)
7474 {
7475 case 'a':
7476 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7477 break;
7478
7479 default:
7480 abort ();
7481 }
7482
7483 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7484 first_fmac);
7485
7486 newerr->vma = -1;
7487
7488 newerr->next = sec_data->erratumlist;
7489 sec_data->erratumlist = newerr;
7490
7491 state = 0;
7492 }
7493
7494 i = next_i;
7495 }
7496 }
7497
7498 if (contents != NULL
7499 && elf_section_data (sec)->this_hdr.contents != contents)
7500 free (contents);
7501 contents = NULL;
7502 }
7503
7504 return TRUE;
7505
7506 error_return:
7507 if (contents != NULL
7508 && elf_section_data (sec)->this_hdr.contents != contents)
7509 free (contents);
7510
7511 return FALSE;
7512 }
7513
7514 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7515 after sections have been laid out, using specially-named symbols. */
7516
7517 void
7518 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7519 struct bfd_link_info *link_info)
7520 {
7521 asection *sec;
7522 struct elf32_arm_link_hash_table *globals;
7523 char *tmp_name;
7524
7525 if (bfd_link_relocatable (link_info))
7526 return;
7527
7528 /* Skip if this bfd does not correspond to an ELF image. */
7529 if (! is_arm_elf (abfd))
7530 return;
7531
7532 globals = elf32_arm_hash_table (link_info);
7533 if (globals == NULL)
7534 return;
7535
7536 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7537 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7538
7539 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7540 {
7541 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7542 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7543
7544 for (; errnode != NULL; errnode = errnode->next)
7545 {
7546 struct elf_link_hash_entry *myh;
7547 bfd_vma vma;
7548
7549 switch (errnode->type)
7550 {
7551 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7552 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7553 /* Find veneer symbol. */
7554 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7555 errnode->u.b.veneer->u.v.id);
7556
7557 myh = elf_link_hash_lookup
7558 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7559
7560 if (myh == NULL)
7561 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7562 "`%s'"), abfd, tmp_name);
7563
7564 vma = myh->root.u.def.section->output_section->vma
7565 + myh->root.u.def.section->output_offset
7566 + myh->root.u.def.value;
7567
7568 errnode->u.b.veneer->vma = vma;
7569 break;
7570
7571 case VFP11_ERRATUM_ARM_VENEER:
7572 case VFP11_ERRATUM_THUMB_VENEER:
7573 /* Find return location. */
7574 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7575 errnode->u.v.id);
7576
7577 myh = elf_link_hash_lookup
7578 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7579
7580 if (myh == NULL)
7581 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7582 "`%s'"), abfd, tmp_name);
7583
7584 vma = myh->root.u.def.section->output_section->vma
7585 + myh->root.u.def.section->output_offset
7586 + myh->root.u.def.value;
7587
7588 errnode->u.v.branch->vma = vma;
7589 break;
7590
7591 default:
7592 abort ();
7593 }
7594 }
7595 }
7596
7597 free (tmp_name);
7598 }
7599
7600 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7601 return locations after sections have been laid out, using
7602 specially-named symbols. */
7603
7604 void
7605 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7606 struct bfd_link_info *link_info)
7607 {
7608 asection *sec;
7609 struct elf32_arm_link_hash_table *globals;
7610 char *tmp_name;
7611
7612 if (bfd_link_relocatable (link_info))
7613 return;
7614
7615 /* Skip if this bfd does not correspond to an ELF image. */
7616 if (! is_arm_elf (abfd))
7617 return;
7618
7619 globals = elf32_arm_hash_table (link_info);
7620 if (globals == NULL)
7621 return;
7622
7623 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7624 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7625
7626 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7627 {
7628 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7629 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7630
7631 for (; errnode != NULL; errnode = errnode->next)
7632 {
7633 struct elf_link_hash_entry *myh;
7634 bfd_vma vma;
7635
7636 switch (errnode->type)
7637 {
7638 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7639 /* Find veneer symbol. */
7640 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7641 errnode->u.b.veneer->u.v.id);
7642
7643 myh = elf_link_hash_lookup
7644 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7645
7646 if (myh == NULL)
7647 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7648 "`%s'"), abfd, tmp_name);
7649
7650 vma = myh->root.u.def.section->output_section->vma
7651 + myh->root.u.def.section->output_offset
7652 + myh->root.u.def.value;
7653
7654 errnode->u.b.veneer->vma = vma;
7655 break;
7656
7657 case STM32L4XX_ERRATUM_VENEER:
7658 /* Find return location. */
7659 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7660 errnode->u.v.id);
7661
7662 myh = elf_link_hash_lookup
7663 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7664
7665 if (myh == NULL)
7666 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7667 "`%s'"), abfd, tmp_name);
7668
7669 vma = myh->root.u.def.section->output_section->vma
7670 + myh->root.u.def.section->output_offset
7671 + myh->root.u.def.value;
7672
7673 errnode->u.v.branch->vma = vma;
7674 break;
7675
7676 default:
7677 abort ();
7678 }
7679 }
7680 }
7681
7682 free (tmp_name);
7683 }
7684
7685 static inline bfd_boolean
7686 is_thumb2_ldmia (const insn32 insn)
7687 {
7688 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7689 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7690 return (insn & 0xffd02000) == 0xe8900000;
7691 }
7692
7693 static inline bfd_boolean
7694 is_thumb2_ldmdb (const insn32 insn)
7695 {
7696 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7697 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7698 return (insn & 0xffd02000) == 0xe9100000;
7699 }
7700
7701 static inline bfd_boolean
7702 is_thumb2_vldm (const insn32 insn)
7703 {
7704 /* A6.5 Extension register load or store instruction
7705 A7.7.229
7706 We look for SP 32-bit and DP 64-bit registers.
7707 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7708 <list> is consecutive 64-bit registers
7709 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7710 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7711 <list> is consecutive 32-bit registers
7712 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7713 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7714 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7715 return
7716 (((insn & 0xfe100f00) == 0xec100b00) ||
7717 ((insn & 0xfe100f00) == 0xec100a00))
7718 && /* (IA without !). */
7719 (((((insn << 7) >> 28) & 0xd) == 0x4)
7720 /* (IA with !), includes VPOP (when reg number is SP). */
7721 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7722 /* (DB with !). */
7723 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7724 }
7725
7726 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7727 VLDM opcode and:
7728 - computes the number and the mode of memory accesses
7729 - decides if the replacement should be done:
7730 . replaces only if > 8-word accesses
7731 . or (testing purposes only) replaces all accesses. */
7732
7733 static bfd_boolean
7734 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7735 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7736 {
7737 int nb_words = 0;
7738
7739 /* The field encoding the register list is the same for both LDMIA
7740 and LDMDB encodings. */
7741 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7742 nb_words = popcount (insn & 0x0000ffff);
7743 else if (is_thumb2_vldm (insn))
7744 nb_words = (insn & 0xff);
7745
7746 /* DEFAULT mode accounts for the real bug condition situation,
7747 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7748 return
7749 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7750 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7751 }
7752
7753 /* Look for potentially-troublesome code sequences which might trigger
7754 the STM STM32L4XX erratum. */
7755
7756 bfd_boolean
7757 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7758 struct bfd_link_info *link_info)
7759 {
7760 asection *sec;
7761 bfd_byte *contents = NULL;
7762 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7763
7764 if (globals == NULL)
7765 return FALSE;
7766
7767 /* If we are only performing a partial link do not bother
7768 to construct any glue. */
7769 if (bfd_link_relocatable (link_info))
7770 return TRUE;
7771
7772 /* Skip if this bfd does not correspond to an ELF image. */
7773 if (! is_arm_elf (abfd))
7774 return TRUE;
7775
7776 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7777 return TRUE;
7778
7779 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7780 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7781 return TRUE;
7782
7783 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7784 {
7785 unsigned int i, span;
7786 struct _arm_elf_section_data *sec_data;
7787
7788 /* If we don't have executable progbits, we're not interested in this
7789 section. Also skip if section is to be excluded. */
7790 if (elf_section_type (sec) != SHT_PROGBITS
7791 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7792 || (sec->flags & SEC_EXCLUDE) != 0
7793 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7794 || sec->output_section == bfd_abs_section_ptr
7795 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7796 continue;
7797
7798 sec_data = elf32_arm_section_data (sec);
7799
7800 if (sec_data->mapcount == 0)
7801 continue;
7802
7803 if (elf_section_data (sec)->this_hdr.contents != NULL)
7804 contents = elf_section_data (sec)->this_hdr.contents;
7805 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7806 goto error_return;
7807
7808 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7809 elf32_arm_compare_mapping);
7810
7811 for (span = 0; span < sec_data->mapcount; span++)
7812 {
7813 unsigned int span_start = sec_data->map[span].vma;
7814 unsigned int span_end = (span == sec_data->mapcount - 1)
7815 ? sec->size : sec_data->map[span + 1].vma;
7816 char span_type = sec_data->map[span].type;
7817 int itblock_current_pos = 0;
7818
7819 /* Only Thumb2 mode need be supported with this CM4 specific
7820 code, we should not encounter any arm mode eg span_type
7821 != 'a'. */
7822 if (span_type != 't')
7823 continue;
7824
7825 for (i = span_start; i < span_end;)
7826 {
7827 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7828 bfd_boolean insn_32bit = FALSE;
7829 bfd_boolean is_ldm = FALSE;
7830 bfd_boolean is_vldm = FALSE;
7831 bfd_boolean is_not_last_in_it_block = FALSE;
7832
7833 /* The first 16-bits of all 32-bit thumb2 instructions start
7834 with opcode[15..13]=0b111 and the encoded op1 can be anything
7835 except opcode[12..11]!=0b00.
7836 See 32-bit Thumb instruction encoding. */
7837 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7838 insn_32bit = TRUE;
7839
7840 /* Compute the predicate that tells if the instruction
7841 is concerned by the IT block
7842 - Creates an error if there is a ldm that is not
7843 last in the IT block thus cannot be replaced
7844 - Otherwise we can create a branch at the end of the
7845 IT block, it will be controlled naturally by IT
7846 with the proper pseudo-predicate
7847 - So the only interesting predicate is the one that
7848 tells that we are not on the last item of an IT
7849 block. */
7850 if (itblock_current_pos != 0)
7851 is_not_last_in_it_block = !!--itblock_current_pos;
7852
7853 if (insn_32bit)
7854 {
7855 /* Load the rest of the insn (in manual-friendly order). */
7856 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7857 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7858 is_vldm = is_thumb2_vldm (insn);
7859
7860 /* Veneers are created for (v)ldm depending on
7861 option flags and memory accesses conditions; but
7862 if the instruction is not the last instruction of
7863 an IT block, we cannot create a jump there, so we
7864 bail out. */
7865 if ((is_ldm || is_vldm) &&
7866 stm32l4xx_need_create_replacing_stub
7867 (insn, globals->stm32l4xx_fix))
7868 {
7869 if (is_not_last_in_it_block)
7870 {
7871 (*_bfd_error_handler)
7872 /* Note - overlong line used here to allow for translation. */
7873 (_("\
7874 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7875 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7876 abfd, sec, (long)i);
7877 }
7878 else
7879 {
7880 elf32_stm32l4xx_erratum_list *newerr =
7881 (elf32_stm32l4xx_erratum_list *)
7882 bfd_zmalloc
7883 (sizeof (elf32_stm32l4xx_erratum_list));
7884
7885 elf32_arm_section_data (sec)
7886 ->stm32l4xx_erratumcount += 1;
7887 newerr->u.b.insn = insn;
7888 /* We create only thumb branches. */
7889 newerr->type =
7890 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7891 record_stm32l4xx_erratum_veneer
7892 (link_info, newerr, abfd, sec,
7893 i,
7894 is_ldm ?
7895 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7896 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7897 newerr->vma = -1;
7898 newerr->next = sec_data->stm32l4xx_erratumlist;
7899 sec_data->stm32l4xx_erratumlist = newerr;
7900 }
7901 }
7902 }
7903 else
7904 {
7905 /* A7.7.37 IT p208
7906 IT blocks are only encoded in T1
7907 Encoding T1: IT{x{y{z}}} <firstcond>
7908 1 0 1 1 - 1 1 1 1 - firstcond - mask
7909 if mask = '0000' then see 'related encodings'
7910 We don't deal with UNPREDICTABLE, just ignore these.
7911 There can be no nested IT blocks so an IT block
7912 is naturally a new one for which it is worth
7913 computing its size. */
7914 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7915 ((insn & 0x000f) != 0x0000);
7916 /* If we have a new IT block we compute its size. */
7917 if (is_newitblock)
7918 {
7919 /* Compute the number of instructions controlled
7920 by the IT block, it will be used to decide
7921 whether we are inside an IT block or not. */
7922 unsigned int mask = insn & 0x000f;
7923 itblock_current_pos = 4 - ctz (mask);
7924 }
7925 }
7926
7927 i += insn_32bit ? 4 : 2;
7928 }
7929 }
7930
7931 if (contents != NULL
7932 && elf_section_data (sec)->this_hdr.contents != contents)
7933 free (contents);
7934 contents = NULL;
7935 }
7936
7937 return TRUE;
7938
7939 error_return:
7940 if (contents != NULL
7941 && elf_section_data (sec)->this_hdr.contents != contents)
7942 free (contents);
7943
7944 return FALSE;
7945 }
7946
7947 /* Set target relocation values needed during linking. */
7948
7949 void
7950 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7951 struct bfd_link_info *link_info,
7952 int target1_is_rel,
7953 char * target2_type,
7954 int fix_v4bx,
7955 int use_blx,
7956 bfd_arm_vfp11_fix vfp11_fix,
7957 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7958 int no_enum_warn, int no_wchar_warn,
7959 int pic_veneer, int fix_cortex_a8,
7960 int fix_arm1176)
7961 {
7962 struct elf32_arm_link_hash_table *globals;
7963
7964 globals = elf32_arm_hash_table (link_info);
7965 if (globals == NULL)
7966 return;
7967
7968 globals->target1_is_rel = target1_is_rel;
7969 if (strcmp (target2_type, "rel") == 0)
7970 globals->target2_reloc = R_ARM_REL32;
7971 else if (strcmp (target2_type, "abs") == 0)
7972 globals->target2_reloc = R_ARM_ABS32;
7973 else if (strcmp (target2_type, "got-rel") == 0)
7974 globals->target2_reloc = R_ARM_GOT_PREL;
7975 else
7976 {
7977 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7978 target2_type);
7979 }
7980 globals->fix_v4bx = fix_v4bx;
7981 globals->use_blx |= use_blx;
7982 globals->vfp11_fix = vfp11_fix;
7983 globals->stm32l4xx_fix = stm32l4xx_fix;
7984 globals->pic_veneer = pic_veneer;
7985 globals->fix_cortex_a8 = fix_cortex_a8;
7986 globals->fix_arm1176 = fix_arm1176;
7987
7988 BFD_ASSERT (is_arm_elf (output_bfd));
7989 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7990 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7991 }
7992
7993 /* Replace the target offset of a Thumb bl or b.w instruction. */
7994
7995 static void
7996 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7997 {
7998 bfd_vma upper;
7999 bfd_vma lower;
8000 int reloc_sign;
8001
8002 BFD_ASSERT ((offset & 1) == 0);
8003
8004 upper = bfd_get_16 (abfd, insn);
8005 lower = bfd_get_16 (abfd, insn + 2);
8006 reloc_sign = (offset < 0) ? 1 : 0;
8007 upper = (upper & ~(bfd_vma) 0x7ff)
8008 | ((offset >> 12) & 0x3ff)
8009 | (reloc_sign << 10);
8010 lower = (lower & ~(bfd_vma) 0x2fff)
8011 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
8012 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
8013 | ((offset >> 1) & 0x7ff);
8014 bfd_put_16 (abfd, upper, insn);
8015 bfd_put_16 (abfd, lower, insn + 2);
8016 }
8017
8018 /* Thumb code calling an ARM function. */
8019
8020 static int
8021 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
8022 const char * name,
8023 bfd * input_bfd,
8024 bfd * output_bfd,
8025 asection * input_section,
8026 bfd_byte * hit_data,
8027 asection * sym_sec,
8028 bfd_vma offset,
8029 bfd_signed_vma addend,
8030 bfd_vma val,
8031 char **error_message)
8032 {
8033 asection * s = 0;
8034 bfd_vma my_offset;
8035 long int ret_offset;
8036 struct elf_link_hash_entry * myh;
8037 struct elf32_arm_link_hash_table * globals;
8038
8039 myh = find_thumb_glue (info, name, error_message);
8040 if (myh == NULL)
8041 return FALSE;
8042
8043 globals = elf32_arm_hash_table (info);
8044 BFD_ASSERT (globals != NULL);
8045 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8046
8047 my_offset = myh->root.u.def.value;
8048
8049 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8050 THUMB2ARM_GLUE_SECTION_NAME);
8051
8052 BFD_ASSERT (s != NULL);
8053 BFD_ASSERT (s->contents != NULL);
8054 BFD_ASSERT (s->output_section != NULL);
8055
8056 if ((my_offset & 0x01) == 0x01)
8057 {
8058 if (sym_sec != NULL
8059 && sym_sec->owner != NULL
8060 && !INTERWORK_FLAG (sym_sec->owner))
8061 {
8062 (*_bfd_error_handler)
8063 (_("%B(%s): warning: interworking not enabled.\n"
8064 " first occurrence: %B: Thumb call to ARM"),
8065 sym_sec->owner, input_bfd, name);
8066
8067 return FALSE;
8068 }
8069
8070 --my_offset;
8071 myh->root.u.def.value = my_offset;
8072
8073 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
8074 s->contents + my_offset);
8075
8076 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
8077 s->contents + my_offset + 2);
8078
8079 ret_offset =
8080 /* Address of destination of the stub. */
8081 ((bfd_signed_vma) val)
8082 - ((bfd_signed_vma)
8083 /* Offset from the start of the current section
8084 to the start of the stubs. */
8085 (s->output_offset
8086 /* Offset of the start of this stub from the start of the stubs. */
8087 + my_offset
8088 /* Address of the start of the current section. */
8089 + s->output_section->vma)
8090 /* The branch instruction is 4 bytes into the stub. */
8091 + 4
8092 /* ARM branches work from the pc of the instruction + 8. */
8093 + 8);
8094
8095 put_arm_insn (globals, output_bfd,
8096 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8097 s->contents + my_offset + 4);
8098 }
8099
8100 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8101
8102 /* Now go back and fix up the original BL insn to point to here. */
8103 ret_offset =
8104 /* Address of where the stub is located. */
8105 (s->output_section->vma + s->output_offset + my_offset)
8106 /* Address of where the BL is located. */
8107 - (input_section->output_section->vma + input_section->output_offset
8108 + offset)
8109 /* Addend in the relocation. */
8110 - addend
8111 /* Biassing for PC-relative addressing. */
8112 - 8;
8113
8114 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8115
8116 return TRUE;
8117 }
8118
8119 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8120
8121 static struct elf_link_hash_entry *
8122 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8123 const char * name,
8124 bfd * input_bfd,
8125 bfd * output_bfd,
8126 asection * sym_sec,
8127 bfd_vma val,
8128 asection * s,
8129 char ** error_message)
8130 {
8131 bfd_vma my_offset;
8132 long int ret_offset;
8133 struct elf_link_hash_entry * myh;
8134 struct elf32_arm_link_hash_table * globals;
8135
8136 myh = find_arm_glue (info, name, error_message);
8137 if (myh == NULL)
8138 return NULL;
8139
8140 globals = elf32_arm_hash_table (info);
8141 BFD_ASSERT (globals != NULL);
8142 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8143
8144 my_offset = myh->root.u.def.value;
8145
8146 if ((my_offset & 0x01) == 0x01)
8147 {
8148 if (sym_sec != NULL
8149 && sym_sec->owner != NULL
8150 && !INTERWORK_FLAG (sym_sec->owner))
8151 {
8152 (*_bfd_error_handler)
8153 (_("%B(%s): warning: interworking not enabled.\n"
8154 " first occurrence: %B: arm call to thumb"),
8155 sym_sec->owner, input_bfd, name);
8156 }
8157
8158 --my_offset;
8159 myh->root.u.def.value = my_offset;
8160
8161 if (bfd_link_pic (info)
8162 || globals->root.is_relocatable_executable
8163 || globals->pic_veneer)
8164 {
8165 /* For relocatable objects we can't use absolute addresses,
8166 so construct the address from a relative offset. */
8167 /* TODO: If the offset is small it's probably worth
8168 constructing the address with adds. */
8169 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8170 s->contents + my_offset);
8171 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8172 s->contents + my_offset + 4);
8173 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8174 s->contents + my_offset + 8);
8175 /* Adjust the offset by 4 for the position of the add,
8176 and 8 for the pipeline offset. */
8177 ret_offset = (val - (s->output_offset
8178 + s->output_section->vma
8179 + my_offset + 12))
8180 | 1;
8181 bfd_put_32 (output_bfd, ret_offset,
8182 s->contents + my_offset + 12);
8183 }
8184 else if (globals->use_blx)
8185 {
8186 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8187 s->contents + my_offset);
8188
8189 /* It's a thumb address. Add the low order bit. */
8190 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8191 s->contents + my_offset + 4);
8192 }
8193 else
8194 {
8195 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8196 s->contents + my_offset);
8197
8198 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8199 s->contents + my_offset + 4);
8200
8201 /* It's a thumb address. Add the low order bit. */
8202 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8203 s->contents + my_offset + 8);
8204
8205 my_offset += 12;
8206 }
8207 }
8208
8209 BFD_ASSERT (my_offset <= globals->arm_glue_size);
8210
8211 return myh;
8212 }
8213
8214 /* Arm code calling a Thumb function. */
8215
8216 static int
8217 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8218 const char * name,
8219 bfd * input_bfd,
8220 bfd * output_bfd,
8221 asection * input_section,
8222 bfd_byte * hit_data,
8223 asection * sym_sec,
8224 bfd_vma offset,
8225 bfd_signed_vma addend,
8226 bfd_vma val,
8227 char **error_message)
8228 {
8229 unsigned long int tmp;
8230 bfd_vma my_offset;
8231 asection * s;
8232 long int ret_offset;
8233 struct elf_link_hash_entry * myh;
8234 struct elf32_arm_link_hash_table * globals;
8235
8236 globals = elf32_arm_hash_table (info);
8237 BFD_ASSERT (globals != NULL);
8238 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8239
8240 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8241 ARM2THUMB_GLUE_SECTION_NAME);
8242 BFD_ASSERT (s != NULL);
8243 BFD_ASSERT (s->contents != NULL);
8244 BFD_ASSERT (s->output_section != NULL);
8245
8246 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8247 sym_sec, val, s, error_message);
8248 if (!myh)
8249 return FALSE;
8250
8251 my_offset = myh->root.u.def.value;
8252 tmp = bfd_get_32 (input_bfd, hit_data);
8253 tmp = tmp & 0xFF000000;
8254
8255 /* Somehow these are both 4 too far, so subtract 8. */
8256 ret_offset = (s->output_offset
8257 + my_offset
8258 + s->output_section->vma
8259 - (input_section->output_offset
8260 + input_section->output_section->vma
8261 + offset + addend)
8262 - 8);
8263
8264 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8265
8266 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8267
8268 return TRUE;
8269 }
8270
8271 /* Populate Arm stub for an exported Thumb function. */
8272
8273 static bfd_boolean
8274 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8275 {
8276 struct bfd_link_info * info = (struct bfd_link_info *) inf;
8277 asection * s;
8278 struct elf_link_hash_entry * myh;
8279 struct elf32_arm_link_hash_entry *eh;
8280 struct elf32_arm_link_hash_table * globals;
8281 asection *sec;
8282 bfd_vma val;
8283 char *error_message;
8284
8285 eh = elf32_arm_hash_entry (h);
8286 /* Allocate stubs for exported Thumb functions on v4t. */
8287 if (eh->export_glue == NULL)
8288 return TRUE;
8289
8290 globals = elf32_arm_hash_table (info);
8291 BFD_ASSERT (globals != NULL);
8292 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8293
8294 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8295 ARM2THUMB_GLUE_SECTION_NAME);
8296 BFD_ASSERT (s != NULL);
8297 BFD_ASSERT (s->contents != NULL);
8298 BFD_ASSERT (s->output_section != NULL);
8299
8300 sec = eh->export_glue->root.u.def.section;
8301
8302 BFD_ASSERT (sec->output_section != NULL);
8303
8304 val = eh->export_glue->root.u.def.value + sec->output_offset
8305 + sec->output_section->vma;
8306
8307 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8308 h->root.u.def.section->owner,
8309 globals->obfd, sec, val, s,
8310 &error_message);
8311 BFD_ASSERT (myh);
8312 return TRUE;
8313 }
8314
8315 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8316
8317 static bfd_vma
8318 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8319 {
8320 bfd_byte *p;
8321 bfd_vma glue_addr;
8322 asection *s;
8323 struct elf32_arm_link_hash_table *globals;
8324
8325 globals = elf32_arm_hash_table (info);
8326 BFD_ASSERT (globals != NULL);
8327 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8328
8329 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8330 ARM_BX_GLUE_SECTION_NAME);
8331 BFD_ASSERT (s != NULL);
8332 BFD_ASSERT (s->contents != NULL);
8333 BFD_ASSERT (s->output_section != NULL);
8334
8335 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8336
8337 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8338
8339 if ((globals->bx_glue_offset[reg] & 1) == 0)
8340 {
8341 p = s->contents + glue_addr;
8342 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8343 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8344 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8345 globals->bx_glue_offset[reg] |= 1;
8346 }
8347
8348 return glue_addr + s->output_section->vma + s->output_offset;
8349 }
8350
8351 /* Generate Arm stubs for exported Thumb symbols. */
8352 static void
8353 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8354 struct bfd_link_info *link_info)
8355 {
8356 struct elf32_arm_link_hash_table * globals;
8357
8358 if (link_info == NULL)
8359 /* Ignore this if we are not called by the ELF backend linker. */
8360 return;
8361
8362 globals = elf32_arm_hash_table (link_info);
8363 if (globals == NULL)
8364 return;
8365
8366 /* If blx is available then exported Thumb symbols are OK and there is
8367 nothing to do. */
8368 if (globals->use_blx)
8369 return;
8370
8371 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8372 link_info);
8373 }
8374
8375 /* Reserve space for COUNT dynamic relocations in relocation selection
8376 SRELOC. */
8377
8378 static void
8379 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8380 bfd_size_type count)
8381 {
8382 struct elf32_arm_link_hash_table *htab;
8383
8384 htab = elf32_arm_hash_table (info);
8385 BFD_ASSERT (htab->root.dynamic_sections_created);
8386 if (sreloc == NULL)
8387 abort ();
8388 sreloc->size += RELOC_SIZE (htab) * count;
8389 }
8390
8391 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8392 dynamic, the relocations should go in SRELOC, otherwise they should
8393 go in the special .rel.iplt section. */
8394
8395 static void
8396 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8397 bfd_size_type count)
8398 {
8399 struct elf32_arm_link_hash_table *htab;
8400
8401 htab = elf32_arm_hash_table (info);
8402 if (!htab->root.dynamic_sections_created)
8403 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8404 else
8405 {
8406 BFD_ASSERT (sreloc != NULL);
8407 sreloc->size += RELOC_SIZE (htab) * count;
8408 }
8409 }
8410
8411 /* Add relocation REL to the end of relocation section SRELOC. */
8412
8413 static void
8414 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8415 asection *sreloc, Elf_Internal_Rela *rel)
8416 {
8417 bfd_byte *loc;
8418 struct elf32_arm_link_hash_table *htab;
8419
8420 htab = elf32_arm_hash_table (info);
8421 if (!htab->root.dynamic_sections_created
8422 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8423 sreloc = htab->root.irelplt;
8424 if (sreloc == NULL)
8425 abort ();
8426 loc = sreloc->contents;
8427 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8428 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8429 abort ();
8430 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8431 }
8432
8433 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8434 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8435 to .plt. */
8436
8437 static void
8438 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8439 bfd_boolean is_iplt_entry,
8440 union gotplt_union *root_plt,
8441 struct arm_plt_info *arm_plt)
8442 {
8443 struct elf32_arm_link_hash_table *htab;
8444 asection *splt;
8445 asection *sgotplt;
8446
8447 htab = elf32_arm_hash_table (info);
8448
8449 if (is_iplt_entry)
8450 {
8451 splt = htab->root.iplt;
8452 sgotplt = htab->root.igotplt;
8453
8454 /* NaCl uses a special first entry in .iplt too. */
8455 if (htab->nacl_p && splt->size == 0)
8456 splt->size += htab->plt_header_size;
8457
8458 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8459 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8460 }
8461 else
8462 {
8463 splt = htab->root.splt;
8464 sgotplt = htab->root.sgotplt;
8465
8466 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8467 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8468
8469 /* If this is the first .plt entry, make room for the special
8470 first entry. */
8471 if (splt->size == 0)
8472 splt->size += htab->plt_header_size;
8473
8474 htab->next_tls_desc_index++;
8475 }
8476
8477 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8478 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8479 splt->size += PLT_THUMB_STUB_SIZE;
8480 root_plt->offset = splt->size;
8481 splt->size += htab->plt_entry_size;
8482
8483 if (!htab->symbian_p)
8484 {
8485 /* We also need to make an entry in the .got.plt section, which
8486 will be placed in the .got section by the linker script. */
8487 if (is_iplt_entry)
8488 arm_plt->got_offset = sgotplt->size;
8489 else
8490 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8491 sgotplt->size += 4;
8492 }
8493 }
8494
8495 static bfd_vma
8496 arm_movw_immediate (bfd_vma value)
8497 {
8498 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8499 }
8500
8501 static bfd_vma
8502 arm_movt_immediate (bfd_vma value)
8503 {
8504 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8505 }
8506
8507 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8508 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8509 Otherwise, DYNINDX is the index of the symbol in the dynamic
8510 symbol table and SYM_VALUE is undefined.
8511
8512 ROOT_PLT points to the offset of the PLT entry from the start of its
8513 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8514 bookkeeping information.
8515
8516 Returns FALSE if there was a problem. */
8517
8518 static bfd_boolean
8519 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8520 union gotplt_union *root_plt,
8521 struct arm_plt_info *arm_plt,
8522 int dynindx, bfd_vma sym_value)
8523 {
8524 struct elf32_arm_link_hash_table *htab;
8525 asection *sgot;
8526 asection *splt;
8527 asection *srel;
8528 bfd_byte *loc;
8529 bfd_vma plt_index;
8530 Elf_Internal_Rela rel;
8531 bfd_vma plt_header_size;
8532 bfd_vma got_header_size;
8533
8534 htab = elf32_arm_hash_table (info);
8535
8536 /* Pick the appropriate sections and sizes. */
8537 if (dynindx == -1)
8538 {
8539 splt = htab->root.iplt;
8540 sgot = htab->root.igotplt;
8541 srel = htab->root.irelplt;
8542
8543 /* There are no reserved entries in .igot.plt, and no special
8544 first entry in .iplt. */
8545 got_header_size = 0;
8546 plt_header_size = 0;
8547 }
8548 else
8549 {
8550 splt = htab->root.splt;
8551 sgot = htab->root.sgotplt;
8552 srel = htab->root.srelplt;
8553
8554 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8555 plt_header_size = htab->plt_header_size;
8556 }
8557 BFD_ASSERT (splt != NULL && srel != NULL);
8558
8559 /* Fill in the entry in the procedure linkage table. */
8560 if (htab->symbian_p)
8561 {
8562 BFD_ASSERT (dynindx >= 0);
8563 put_arm_insn (htab, output_bfd,
8564 elf32_arm_symbian_plt_entry[0],
8565 splt->contents + root_plt->offset);
8566 bfd_put_32 (output_bfd,
8567 elf32_arm_symbian_plt_entry[1],
8568 splt->contents + root_plt->offset + 4);
8569
8570 /* Fill in the entry in the .rel.plt section. */
8571 rel.r_offset = (splt->output_section->vma
8572 + splt->output_offset
8573 + root_plt->offset + 4);
8574 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8575
8576 /* Get the index in the procedure linkage table which
8577 corresponds to this symbol. This is the index of this symbol
8578 in all the symbols for which we are making plt entries. The
8579 first entry in the procedure linkage table is reserved. */
8580 plt_index = ((root_plt->offset - plt_header_size)
8581 / htab->plt_entry_size);
8582 }
8583 else
8584 {
8585 bfd_vma got_offset, got_address, plt_address;
8586 bfd_vma got_displacement, initial_got_entry;
8587 bfd_byte * ptr;
8588
8589 BFD_ASSERT (sgot != NULL);
8590
8591 /* Get the offset into the .(i)got.plt table of the entry that
8592 corresponds to this function. */
8593 got_offset = (arm_plt->got_offset & -2);
8594
8595 /* Get the index in the procedure linkage table which
8596 corresponds to this symbol. This is the index of this symbol
8597 in all the symbols for which we are making plt entries.
8598 After the reserved .got.plt entries, all symbols appear in
8599 the same order as in .plt. */
8600 plt_index = (got_offset - got_header_size) / 4;
8601
8602 /* Calculate the address of the GOT entry. */
8603 got_address = (sgot->output_section->vma
8604 + sgot->output_offset
8605 + got_offset);
8606
8607 /* ...and the address of the PLT entry. */
8608 plt_address = (splt->output_section->vma
8609 + splt->output_offset
8610 + root_plt->offset);
8611
8612 ptr = splt->contents + root_plt->offset;
8613 if (htab->vxworks_p && bfd_link_pic (info))
8614 {
8615 unsigned int i;
8616 bfd_vma val;
8617
8618 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8619 {
8620 val = elf32_arm_vxworks_shared_plt_entry[i];
8621 if (i == 2)
8622 val |= got_address - sgot->output_section->vma;
8623 if (i == 5)
8624 val |= plt_index * RELOC_SIZE (htab);
8625 if (i == 2 || i == 5)
8626 bfd_put_32 (output_bfd, val, ptr);
8627 else
8628 put_arm_insn (htab, output_bfd, val, ptr);
8629 }
8630 }
8631 else if (htab->vxworks_p)
8632 {
8633 unsigned int i;
8634 bfd_vma val;
8635
8636 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8637 {
8638 val = elf32_arm_vxworks_exec_plt_entry[i];
8639 if (i == 2)
8640 val |= got_address;
8641 if (i == 4)
8642 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8643 if (i == 5)
8644 val |= plt_index * RELOC_SIZE (htab);
8645 if (i == 2 || i == 5)
8646 bfd_put_32 (output_bfd, val, ptr);
8647 else
8648 put_arm_insn (htab, output_bfd, val, ptr);
8649 }
8650
8651 loc = (htab->srelplt2->contents
8652 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8653
8654 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8655 referencing the GOT for this PLT entry. */
8656 rel.r_offset = plt_address + 8;
8657 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8658 rel.r_addend = got_offset;
8659 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8660 loc += RELOC_SIZE (htab);
8661
8662 /* Create the R_ARM_ABS32 relocation referencing the
8663 beginning of the PLT for this GOT entry. */
8664 rel.r_offset = got_address;
8665 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8666 rel.r_addend = 0;
8667 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8668 }
8669 else if (htab->nacl_p)
8670 {
8671 /* Calculate the displacement between the PLT slot and the
8672 common tail that's part of the special initial PLT slot. */
8673 int32_t tail_displacement
8674 = ((splt->output_section->vma + splt->output_offset
8675 + ARM_NACL_PLT_TAIL_OFFSET)
8676 - (plt_address + htab->plt_entry_size + 4));
8677 BFD_ASSERT ((tail_displacement & 3) == 0);
8678 tail_displacement >>= 2;
8679
8680 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8681 || (-tail_displacement & 0xff000000) == 0);
8682
8683 /* Calculate the displacement between the PLT slot and the entry
8684 in the GOT. The offset accounts for the value produced by
8685 adding to pc in the penultimate instruction of the PLT stub. */
8686 got_displacement = (got_address
8687 - (plt_address + htab->plt_entry_size));
8688
8689 /* NaCl does not support interworking at all. */
8690 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8691
8692 put_arm_insn (htab, output_bfd,
8693 elf32_arm_nacl_plt_entry[0]
8694 | arm_movw_immediate (got_displacement),
8695 ptr + 0);
8696 put_arm_insn (htab, output_bfd,
8697 elf32_arm_nacl_plt_entry[1]
8698 | arm_movt_immediate (got_displacement),
8699 ptr + 4);
8700 put_arm_insn (htab, output_bfd,
8701 elf32_arm_nacl_plt_entry[2],
8702 ptr + 8);
8703 put_arm_insn (htab, output_bfd,
8704 elf32_arm_nacl_plt_entry[3]
8705 | (tail_displacement & 0x00ffffff),
8706 ptr + 12);
8707 }
8708 else if (using_thumb_only (htab))
8709 {
8710 /* PR ld/16017: Generate thumb only PLT entries. */
8711 if (!using_thumb2 (htab))
8712 {
8713 /* FIXME: We ought to be able to generate thumb-1 PLT
8714 instructions... */
8715 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8716 output_bfd);
8717 return FALSE;
8718 }
8719
8720 /* Calculate the displacement between the PLT slot and the entry in
8721 the GOT. The 12-byte offset accounts for the value produced by
8722 adding to pc in the 3rd instruction of the PLT stub. */
8723 got_displacement = got_address - (plt_address + 12);
8724
8725 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8726 instead of 'put_thumb_insn'. */
8727 put_arm_insn (htab, output_bfd,
8728 elf32_thumb2_plt_entry[0]
8729 | ((got_displacement & 0x000000ff) << 16)
8730 | ((got_displacement & 0x00000700) << 20)
8731 | ((got_displacement & 0x00000800) >> 1)
8732 | ((got_displacement & 0x0000f000) >> 12),
8733 ptr + 0);
8734 put_arm_insn (htab, output_bfd,
8735 elf32_thumb2_plt_entry[1]
8736 | ((got_displacement & 0x00ff0000) )
8737 | ((got_displacement & 0x07000000) << 4)
8738 | ((got_displacement & 0x08000000) >> 17)
8739 | ((got_displacement & 0xf0000000) >> 28),
8740 ptr + 4);
8741 put_arm_insn (htab, output_bfd,
8742 elf32_thumb2_plt_entry[2],
8743 ptr + 8);
8744 put_arm_insn (htab, output_bfd,
8745 elf32_thumb2_plt_entry[3],
8746 ptr + 12);
8747 }
8748 else
8749 {
8750 /* Calculate the displacement between the PLT slot and the
8751 entry in the GOT. The eight-byte offset accounts for the
8752 value produced by adding to pc in the first instruction
8753 of the PLT stub. */
8754 got_displacement = got_address - (plt_address + 8);
8755
8756 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8757 {
8758 put_thumb_insn (htab, output_bfd,
8759 elf32_arm_plt_thumb_stub[0], ptr - 4);
8760 put_thumb_insn (htab, output_bfd,
8761 elf32_arm_plt_thumb_stub[1], ptr - 2);
8762 }
8763
8764 if (!elf32_arm_use_long_plt_entry)
8765 {
8766 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8767
8768 put_arm_insn (htab, output_bfd,
8769 elf32_arm_plt_entry_short[0]
8770 | ((got_displacement & 0x0ff00000) >> 20),
8771 ptr + 0);
8772 put_arm_insn (htab, output_bfd,
8773 elf32_arm_plt_entry_short[1]
8774 | ((got_displacement & 0x000ff000) >> 12),
8775 ptr+ 4);
8776 put_arm_insn (htab, output_bfd,
8777 elf32_arm_plt_entry_short[2]
8778 | (got_displacement & 0x00000fff),
8779 ptr + 8);
8780 #ifdef FOUR_WORD_PLT
8781 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8782 #endif
8783 }
8784 else
8785 {
8786 put_arm_insn (htab, output_bfd,
8787 elf32_arm_plt_entry_long[0]
8788 | ((got_displacement & 0xf0000000) >> 28),
8789 ptr + 0);
8790 put_arm_insn (htab, output_bfd,
8791 elf32_arm_plt_entry_long[1]
8792 | ((got_displacement & 0x0ff00000) >> 20),
8793 ptr + 4);
8794 put_arm_insn (htab, output_bfd,
8795 elf32_arm_plt_entry_long[2]
8796 | ((got_displacement & 0x000ff000) >> 12),
8797 ptr+ 8);
8798 put_arm_insn (htab, output_bfd,
8799 elf32_arm_plt_entry_long[3]
8800 | (got_displacement & 0x00000fff),
8801 ptr + 12);
8802 }
8803 }
8804
8805 /* Fill in the entry in the .rel(a).(i)plt section. */
8806 rel.r_offset = got_address;
8807 rel.r_addend = 0;
8808 if (dynindx == -1)
8809 {
8810 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8811 The dynamic linker or static executable then calls SYM_VALUE
8812 to determine the correct run-time value of the .igot.plt entry. */
8813 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8814 initial_got_entry = sym_value;
8815 }
8816 else
8817 {
8818 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8819 initial_got_entry = (splt->output_section->vma
8820 + splt->output_offset);
8821 }
8822
8823 /* Fill in the entry in the global offset table. */
8824 bfd_put_32 (output_bfd, initial_got_entry,
8825 sgot->contents + got_offset);
8826 }
8827
8828 if (dynindx == -1)
8829 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8830 else
8831 {
8832 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8833 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8834 }
8835
8836 return TRUE;
8837 }
8838
8839 /* Some relocations map to different relocations depending on the
8840 target. Return the real relocation. */
8841
8842 static int
8843 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8844 int r_type)
8845 {
8846 switch (r_type)
8847 {
8848 case R_ARM_TARGET1:
8849 if (globals->target1_is_rel)
8850 return R_ARM_REL32;
8851 else
8852 return R_ARM_ABS32;
8853
8854 case R_ARM_TARGET2:
8855 return globals->target2_reloc;
8856
8857 default:
8858 return r_type;
8859 }
8860 }
8861
8862 /* Return the base VMA address which should be subtracted from real addresses
8863 when resolving @dtpoff relocation.
8864 This is PT_TLS segment p_vaddr. */
8865
8866 static bfd_vma
8867 dtpoff_base (struct bfd_link_info *info)
8868 {
8869 /* If tls_sec is NULL, we should have signalled an error already. */
8870 if (elf_hash_table (info)->tls_sec == NULL)
8871 return 0;
8872 return elf_hash_table (info)->tls_sec->vma;
8873 }
8874
8875 /* Return the relocation value for @tpoff relocation
8876 if STT_TLS virtual address is ADDRESS. */
8877
8878 static bfd_vma
8879 tpoff (struct bfd_link_info *info, bfd_vma address)
8880 {
8881 struct elf_link_hash_table *htab = elf_hash_table (info);
8882 bfd_vma base;
8883
8884 /* If tls_sec is NULL, we should have signalled an error already. */
8885 if (htab->tls_sec == NULL)
8886 return 0;
8887 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8888 return address - htab->tls_sec->vma + base;
8889 }
8890
8891 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8892 VALUE is the relocation value. */
8893
8894 static bfd_reloc_status_type
8895 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8896 {
8897 if (value > 0xfff)
8898 return bfd_reloc_overflow;
8899
8900 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8901 bfd_put_32 (abfd, value, data);
8902 return bfd_reloc_ok;
8903 }
8904
8905 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8906 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8907 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8908
8909 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8910 is to then call final_link_relocate. Return other values in the
8911 case of error.
8912
8913 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8914 the pre-relaxed code. It would be nice if the relocs were updated
8915 to match the optimization. */
8916
8917 static bfd_reloc_status_type
8918 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8919 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8920 Elf_Internal_Rela *rel, unsigned long is_local)
8921 {
8922 unsigned long insn;
8923
8924 switch (ELF32_R_TYPE (rel->r_info))
8925 {
8926 default:
8927 return bfd_reloc_notsupported;
8928
8929 case R_ARM_TLS_GOTDESC:
8930 if (is_local)
8931 insn = 0;
8932 else
8933 {
8934 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8935 if (insn & 1)
8936 insn -= 5; /* THUMB */
8937 else
8938 insn -= 8; /* ARM */
8939 }
8940 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8941 return bfd_reloc_continue;
8942
8943 case R_ARM_THM_TLS_DESCSEQ:
8944 /* Thumb insn. */
8945 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8946 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8947 {
8948 if (is_local)
8949 /* nop */
8950 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8951 }
8952 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8953 {
8954 if (is_local)
8955 /* nop */
8956 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8957 else
8958 /* ldr rx,[ry] */
8959 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8960 }
8961 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8962 {
8963 if (is_local)
8964 /* nop */
8965 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8966 else
8967 /* mov r0, rx */
8968 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8969 contents + rel->r_offset);
8970 }
8971 else
8972 {
8973 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8974 /* It's a 32 bit instruction, fetch the rest of it for
8975 error generation. */
8976 insn = (insn << 16)
8977 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8978 (*_bfd_error_handler)
8979 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8980 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8981 return bfd_reloc_notsupported;
8982 }
8983 break;
8984
8985 case R_ARM_TLS_DESCSEQ:
8986 /* arm insn. */
8987 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8988 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8989 {
8990 if (is_local)
8991 /* mov rx, ry */
8992 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8993 contents + rel->r_offset);
8994 }
8995 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8996 {
8997 if (is_local)
8998 /* nop */
8999 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9000 else
9001 /* ldr rx,[ry] */
9002 bfd_put_32 (input_bfd, insn & 0xfffff000,
9003 contents + rel->r_offset);
9004 }
9005 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
9006 {
9007 if (is_local)
9008 /* nop */
9009 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9010 else
9011 /* mov r0, rx */
9012 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
9013 contents + rel->r_offset);
9014 }
9015 else
9016 {
9017 (*_bfd_error_handler)
9018 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
9019 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
9020 return bfd_reloc_notsupported;
9021 }
9022 break;
9023
9024 case R_ARM_TLS_CALL:
9025 /* GD->IE relaxation, turn the instruction into 'nop' or
9026 'ldr r0, [pc,r0]' */
9027 insn = is_local ? 0xe1a00000 : 0xe79f0000;
9028 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9029 break;
9030
9031 case R_ARM_THM_TLS_CALL:
9032 /* GD->IE relaxation. */
9033 if (!is_local)
9034 /* add r0,pc; ldr r0, [r0] */
9035 insn = 0x44786800;
9036 else if (using_thumb2 (globals))
9037 /* nop.w */
9038 insn = 0xf3af8000;
9039 else
9040 /* nop; nop */
9041 insn = 0xbf00bf00;
9042
9043 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
9044 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
9045 break;
9046 }
9047 return bfd_reloc_ok;
9048 }
9049
9050 /* For a given value of n, calculate the value of G_n as required to
9051 deal with group relocations. We return it in the form of an
9052 encoded constant-and-rotation, together with the final residual. If n is
9053 specified as less than zero, then final_residual is filled with the
9054 input value and no further action is performed. */
9055
9056 static bfd_vma
9057 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
9058 {
9059 int current_n;
9060 bfd_vma g_n;
9061 bfd_vma encoded_g_n = 0;
9062 bfd_vma residual = value; /* Also known as Y_n. */
9063
9064 for (current_n = 0; current_n <= n; current_n++)
9065 {
9066 int shift;
9067
9068 /* Calculate which part of the value to mask. */
9069 if (residual == 0)
9070 shift = 0;
9071 else
9072 {
9073 int msb;
9074
9075 /* Determine the most significant bit in the residual and
9076 align the resulting value to a 2-bit boundary. */
9077 for (msb = 30; msb >= 0; msb -= 2)
9078 if (residual & (3 << msb))
9079 break;
9080
9081 /* The desired shift is now (msb - 6), or zero, whichever
9082 is the greater. */
9083 shift = msb - 6;
9084 if (shift < 0)
9085 shift = 0;
9086 }
9087
9088 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9089 g_n = residual & (0xff << shift);
9090 encoded_g_n = (g_n >> shift)
9091 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9092
9093 /* Calculate the residual for the next time around. */
9094 residual &= ~g_n;
9095 }
9096
9097 *final_residual = residual;
9098
9099 return encoded_g_n;
9100 }
9101
9102 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9103 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9104
9105 static int
9106 identify_add_or_sub (bfd_vma insn)
9107 {
9108 int opcode = insn & 0x1e00000;
9109
9110 if (opcode == 1 << 23) /* ADD */
9111 return 1;
9112
9113 if (opcode == 1 << 22) /* SUB */
9114 return -1;
9115
9116 return 0;
9117 }
9118
9119 /* Perform a relocation as part of a final link. */
9120
9121 static bfd_reloc_status_type
9122 elf32_arm_final_link_relocate (reloc_howto_type * howto,
9123 bfd * input_bfd,
9124 bfd * output_bfd,
9125 asection * input_section,
9126 bfd_byte * contents,
9127 Elf_Internal_Rela * rel,
9128 bfd_vma value,
9129 struct bfd_link_info * info,
9130 asection * sym_sec,
9131 const char * sym_name,
9132 unsigned char st_type,
9133 enum arm_st_branch_type branch_type,
9134 struct elf_link_hash_entry * h,
9135 bfd_boolean * unresolved_reloc_p,
9136 char ** error_message)
9137 {
9138 unsigned long r_type = howto->type;
9139 unsigned long r_symndx;
9140 bfd_byte * hit_data = contents + rel->r_offset;
9141 bfd_vma * local_got_offsets;
9142 bfd_vma * local_tlsdesc_gotents;
9143 asection * sgot;
9144 asection * splt;
9145 asection * sreloc = NULL;
9146 asection * srelgot;
9147 bfd_vma addend;
9148 bfd_signed_vma signed_addend;
9149 unsigned char dynreloc_st_type;
9150 bfd_vma dynreloc_value;
9151 struct elf32_arm_link_hash_table * globals;
9152 struct elf32_arm_link_hash_entry *eh;
9153 union gotplt_union *root_plt;
9154 struct arm_plt_info *arm_plt;
9155 bfd_vma plt_offset;
9156 bfd_vma gotplt_offset;
9157 bfd_boolean has_iplt_entry;
9158
9159 globals = elf32_arm_hash_table (info);
9160 if (globals == NULL)
9161 return bfd_reloc_notsupported;
9162
9163 BFD_ASSERT (is_arm_elf (input_bfd));
9164
9165 /* Some relocation types map to different relocations depending on the
9166 target. We pick the right one here. */
9167 r_type = arm_real_reloc_type (globals, r_type);
9168
9169 /* It is possible to have linker relaxations on some TLS access
9170 models. Update our information here. */
9171 r_type = elf32_arm_tls_transition (info, r_type, h);
9172
9173 if (r_type != howto->type)
9174 howto = elf32_arm_howto_from_type (r_type);
9175
9176 eh = (struct elf32_arm_link_hash_entry *) h;
9177 sgot = globals->root.sgot;
9178 local_got_offsets = elf_local_got_offsets (input_bfd);
9179 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9180
9181 if (globals->root.dynamic_sections_created)
9182 srelgot = globals->root.srelgot;
9183 else
9184 srelgot = NULL;
9185
9186 r_symndx = ELF32_R_SYM (rel->r_info);
9187
9188 if (globals->use_rel)
9189 {
9190 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9191
9192 if (addend & ((howto->src_mask + 1) >> 1))
9193 {
9194 signed_addend = -1;
9195 signed_addend &= ~ howto->src_mask;
9196 signed_addend |= addend;
9197 }
9198 else
9199 signed_addend = addend;
9200 }
9201 else
9202 addend = signed_addend = rel->r_addend;
9203
9204 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9205 are resolving a function call relocation. */
9206 if (using_thumb_only (globals)
9207 && (r_type == R_ARM_THM_CALL
9208 || r_type == R_ARM_THM_JUMP24)
9209 && branch_type == ST_BRANCH_TO_ARM)
9210 branch_type = ST_BRANCH_TO_THUMB;
9211
9212 /* Record the symbol information that should be used in dynamic
9213 relocations. */
9214 dynreloc_st_type = st_type;
9215 dynreloc_value = value;
9216 if (branch_type == ST_BRANCH_TO_THUMB)
9217 dynreloc_value |= 1;
9218
9219 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9220 VALUE appropriately for relocations that we resolve at link time. */
9221 has_iplt_entry = FALSE;
9222 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9223 && root_plt->offset != (bfd_vma) -1)
9224 {
9225 plt_offset = root_plt->offset;
9226 gotplt_offset = arm_plt->got_offset;
9227
9228 if (h == NULL || eh->is_iplt)
9229 {
9230 has_iplt_entry = TRUE;
9231 splt = globals->root.iplt;
9232
9233 /* Populate .iplt entries here, because not all of them will
9234 be seen by finish_dynamic_symbol. The lower bit is set if
9235 we have already populated the entry. */
9236 if (plt_offset & 1)
9237 plt_offset--;
9238 else
9239 {
9240 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9241 -1, dynreloc_value))
9242 root_plt->offset |= 1;
9243 else
9244 return bfd_reloc_notsupported;
9245 }
9246
9247 /* Static relocations always resolve to the .iplt entry. */
9248 st_type = STT_FUNC;
9249 value = (splt->output_section->vma
9250 + splt->output_offset
9251 + plt_offset);
9252 branch_type = ST_BRANCH_TO_ARM;
9253
9254 /* If there are non-call relocations that resolve to the .iplt
9255 entry, then all dynamic ones must too. */
9256 if (arm_plt->noncall_refcount != 0)
9257 {
9258 dynreloc_st_type = st_type;
9259 dynreloc_value = value;
9260 }
9261 }
9262 else
9263 /* We populate the .plt entry in finish_dynamic_symbol. */
9264 splt = globals->root.splt;
9265 }
9266 else
9267 {
9268 splt = NULL;
9269 plt_offset = (bfd_vma) -1;
9270 gotplt_offset = (bfd_vma) -1;
9271 }
9272
9273 switch (r_type)
9274 {
9275 case R_ARM_NONE:
9276 /* We don't need to find a value for this symbol. It's just a
9277 marker. */
9278 *unresolved_reloc_p = FALSE;
9279 return bfd_reloc_ok;
9280
9281 case R_ARM_ABS12:
9282 if (!globals->vxworks_p)
9283 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9284
9285 case R_ARM_PC24:
9286 case R_ARM_ABS32:
9287 case R_ARM_ABS32_NOI:
9288 case R_ARM_REL32:
9289 case R_ARM_REL32_NOI:
9290 case R_ARM_CALL:
9291 case R_ARM_JUMP24:
9292 case R_ARM_XPC25:
9293 case R_ARM_PREL31:
9294 case R_ARM_PLT32:
9295 /* Handle relocations which should use the PLT entry. ABS32/REL32
9296 will use the symbol's value, which may point to a PLT entry, but we
9297 don't need to handle that here. If we created a PLT entry, all
9298 branches in this object should go to it, except if the PLT is too
9299 far away, in which case a long branch stub should be inserted. */
9300 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9301 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9302 && r_type != R_ARM_CALL
9303 && r_type != R_ARM_JUMP24
9304 && r_type != R_ARM_PLT32)
9305 && plt_offset != (bfd_vma) -1)
9306 {
9307 /* If we've created a .plt section, and assigned a PLT entry
9308 to this function, it must either be a STT_GNU_IFUNC reference
9309 or not be known to bind locally. In other cases, we should
9310 have cleared the PLT entry by now. */
9311 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9312
9313 value = (splt->output_section->vma
9314 + splt->output_offset
9315 + plt_offset);
9316 *unresolved_reloc_p = FALSE;
9317 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9318 contents, rel->r_offset, value,
9319 rel->r_addend);
9320 }
9321
9322 /* When generating a shared object or relocatable executable, these
9323 relocations are copied into the output file to be resolved at
9324 run time. */
9325 if ((bfd_link_pic (info)
9326 || globals->root.is_relocatable_executable)
9327 && (input_section->flags & SEC_ALLOC)
9328 && !(globals->vxworks_p
9329 && strcmp (input_section->output_section->name,
9330 ".tls_vars") == 0)
9331 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9332 || !SYMBOL_CALLS_LOCAL (info, h))
9333 && !(input_bfd == globals->stub_bfd
9334 && strstr (input_section->name, STUB_SUFFIX))
9335 && (h == NULL
9336 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9337 || h->root.type != bfd_link_hash_undefweak)
9338 && r_type != R_ARM_PC24
9339 && r_type != R_ARM_CALL
9340 && r_type != R_ARM_JUMP24
9341 && r_type != R_ARM_PREL31
9342 && r_type != R_ARM_PLT32)
9343 {
9344 Elf_Internal_Rela outrel;
9345 bfd_boolean skip, relocate;
9346
9347 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9348 && !h->def_regular)
9349 {
9350 char *v = _("shared object");
9351
9352 if (bfd_link_executable (info))
9353 v = _("PIE executable");
9354
9355 (*_bfd_error_handler)
9356 (_("%B: relocation %s against external or undefined symbol `%s'"
9357 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9358 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9359 return bfd_reloc_notsupported;
9360 }
9361
9362 *unresolved_reloc_p = FALSE;
9363
9364 if (sreloc == NULL && globals->root.dynamic_sections_created)
9365 {
9366 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9367 ! globals->use_rel);
9368
9369 if (sreloc == NULL)
9370 return bfd_reloc_notsupported;
9371 }
9372
9373 skip = FALSE;
9374 relocate = FALSE;
9375
9376 outrel.r_addend = addend;
9377 outrel.r_offset =
9378 _bfd_elf_section_offset (output_bfd, info, input_section,
9379 rel->r_offset);
9380 if (outrel.r_offset == (bfd_vma) -1)
9381 skip = TRUE;
9382 else if (outrel.r_offset == (bfd_vma) -2)
9383 skip = TRUE, relocate = TRUE;
9384 outrel.r_offset += (input_section->output_section->vma
9385 + input_section->output_offset);
9386
9387 if (skip)
9388 memset (&outrel, 0, sizeof outrel);
9389 else if (h != NULL
9390 && h->dynindx != -1
9391 && (!bfd_link_pic (info)
9392 || !SYMBOLIC_BIND (info, h)
9393 || !h->def_regular))
9394 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9395 else
9396 {
9397 int symbol;
9398
9399 /* This symbol is local, or marked to become local. */
9400 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9401 if (globals->symbian_p)
9402 {
9403 asection *osec;
9404
9405 /* On Symbian OS, the data segment and text segement
9406 can be relocated independently. Therefore, we
9407 must indicate the segment to which this
9408 relocation is relative. The BPABI allows us to
9409 use any symbol in the right segment; we just use
9410 the section symbol as it is convenient. (We
9411 cannot use the symbol given by "h" directly as it
9412 will not appear in the dynamic symbol table.)
9413
9414 Note that the dynamic linker ignores the section
9415 symbol value, so we don't subtract osec->vma
9416 from the emitted reloc addend. */
9417 if (sym_sec)
9418 osec = sym_sec->output_section;
9419 else
9420 osec = input_section->output_section;
9421 symbol = elf_section_data (osec)->dynindx;
9422 if (symbol == 0)
9423 {
9424 struct elf_link_hash_table *htab = elf_hash_table (info);
9425
9426 if ((osec->flags & SEC_READONLY) == 0
9427 && htab->data_index_section != NULL)
9428 osec = htab->data_index_section;
9429 else
9430 osec = htab->text_index_section;
9431 symbol = elf_section_data (osec)->dynindx;
9432 }
9433 BFD_ASSERT (symbol != 0);
9434 }
9435 else
9436 /* On SVR4-ish systems, the dynamic loader cannot
9437 relocate the text and data segments independently,
9438 so the symbol does not matter. */
9439 symbol = 0;
9440 if (dynreloc_st_type == STT_GNU_IFUNC)
9441 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9442 to the .iplt entry. Instead, every non-call reference
9443 must use an R_ARM_IRELATIVE relocation to obtain the
9444 correct run-time address. */
9445 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9446 else
9447 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9448 if (globals->use_rel)
9449 relocate = TRUE;
9450 else
9451 outrel.r_addend += dynreloc_value;
9452 }
9453
9454 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9455
9456 /* If this reloc is against an external symbol, we do not want to
9457 fiddle with the addend. Otherwise, we need to include the symbol
9458 value so that it becomes an addend for the dynamic reloc. */
9459 if (! relocate)
9460 return bfd_reloc_ok;
9461
9462 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9463 contents, rel->r_offset,
9464 dynreloc_value, (bfd_vma) 0);
9465 }
9466 else switch (r_type)
9467 {
9468 case R_ARM_ABS12:
9469 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9470
9471 case R_ARM_XPC25: /* Arm BLX instruction. */
9472 case R_ARM_CALL:
9473 case R_ARM_JUMP24:
9474 case R_ARM_PC24: /* Arm B/BL instruction. */
9475 case R_ARM_PLT32:
9476 {
9477 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9478
9479 if (r_type == R_ARM_XPC25)
9480 {
9481 /* Check for Arm calling Arm function. */
9482 /* FIXME: Should we translate the instruction into a BL
9483 instruction instead ? */
9484 if (branch_type != ST_BRANCH_TO_THUMB)
9485 (*_bfd_error_handler)
9486 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9487 input_bfd,
9488 h ? h->root.root.string : "(local)");
9489 }
9490 else if (r_type == R_ARM_PC24)
9491 {
9492 /* Check for Arm calling Thumb function. */
9493 if (branch_type == ST_BRANCH_TO_THUMB)
9494 {
9495 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9496 output_bfd, input_section,
9497 hit_data, sym_sec, rel->r_offset,
9498 signed_addend, value,
9499 error_message))
9500 return bfd_reloc_ok;
9501 else
9502 return bfd_reloc_dangerous;
9503 }
9504 }
9505
9506 /* Check if a stub has to be inserted because the
9507 destination is too far or we are changing mode. */
9508 if ( r_type == R_ARM_CALL
9509 || r_type == R_ARM_JUMP24
9510 || r_type == R_ARM_PLT32)
9511 {
9512 enum elf32_arm_stub_type stub_type = arm_stub_none;
9513 struct elf32_arm_link_hash_entry *hash;
9514
9515 hash = (struct elf32_arm_link_hash_entry *) h;
9516 stub_type = arm_type_of_stub (info, input_section, rel,
9517 st_type, &branch_type,
9518 hash, value, sym_sec,
9519 input_bfd, sym_name);
9520
9521 if (stub_type != arm_stub_none)
9522 {
9523 /* The target is out of reach, so redirect the
9524 branch to the local stub for this function. */
9525 stub_entry = elf32_arm_get_stub_entry (input_section,
9526 sym_sec, h,
9527 rel, globals,
9528 stub_type);
9529 {
9530 if (stub_entry != NULL)
9531 value = (stub_entry->stub_offset
9532 + stub_entry->stub_sec->output_offset
9533 + stub_entry->stub_sec->output_section->vma);
9534
9535 if (plt_offset != (bfd_vma) -1)
9536 *unresolved_reloc_p = FALSE;
9537 }
9538 }
9539 else
9540 {
9541 /* If the call goes through a PLT entry, make sure to
9542 check distance to the right destination address. */
9543 if (plt_offset != (bfd_vma) -1)
9544 {
9545 value = (splt->output_section->vma
9546 + splt->output_offset
9547 + plt_offset);
9548 *unresolved_reloc_p = FALSE;
9549 /* The PLT entry is in ARM mode, regardless of the
9550 target function. */
9551 branch_type = ST_BRANCH_TO_ARM;
9552 }
9553 }
9554 }
9555
9556 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9557 where:
9558 S is the address of the symbol in the relocation.
9559 P is address of the instruction being relocated.
9560 A is the addend (extracted from the instruction) in bytes.
9561
9562 S is held in 'value'.
9563 P is the base address of the section containing the
9564 instruction plus the offset of the reloc into that
9565 section, ie:
9566 (input_section->output_section->vma +
9567 input_section->output_offset +
9568 rel->r_offset).
9569 A is the addend, converted into bytes, ie:
9570 (signed_addend * 4)
9571
9572 Note: None of these operations have knowledge of the pipeline
9573 size of the processor, thus it is up to the assembler to
9574 encode this information into the addend. */
9575 value -= (input_section->output_section->vma
9576 + input_section->output_offset);
9577 value -= rel->r_offset;
9578 if (globals->use_rel)
9579 value += (signed_addend << howto->size);
9580 else
9581 /* RELA addends do not have to be adjusted by howto->size. */
9582 value += signed_addend;
9583
9584 signed_addend = value;
9585 signed_addend >>= howto->rightshift;
9586
9587 /* A branch to an undefined weak symbol is turned into a jump to
9588 the next instruction unless a PLT entry will be created.
9589 Do the same for local undefined symbols (but not for STN_UNDEF).
9590 The jump to the next instruction is optimized as a NOP depending
9591 on the architecture. */
9592 if (h ? (h->root.type == bfd_link_hash_undefweak
9593 && plt_offset == (bfd_vma) -1)
9594 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9595 {
9596 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9597
9598 if (arch_has_arm_nop (globals))
9599 value |= 0x0320f000;
9600 else
9601 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9602 }
9603 else
9604 {
9605 /* Perform a signed range check. */
9606 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9607 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9608 return bfd_reloc_overflow;
9609
9610 addend = (value & 2);
9611
9612 value = (signed_addend & howto->dst_mask)
9613 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9614
9615 if (r_type == R_ARM_CALL)
9616 {
9617 /* Set the H bit in the BLX instruction. */
9618 if (branch_type == ST_BRANCH_TO_THUMB)
9619 {
9620 if (addend)
9621 value |= (1 << 24);
9622 else
9623 value &= ~(bfd_vma)(1 << 24);
9624 }
9625
9626 /* Select the correct instruction (BL or BLX). */
9627 /* Only if we are not handling a BL to a stub. In this
9628 case, mode switching is performed by the stub. */
9629 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9630 value |= (1 << 28);
9631 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9632 {
9633 value &= ~(bfd_vma)(1 << 28);
9634 value |= (1 << 24);
9635 }
9636 }
9637 }
9638 }
9639 break;
9640
9641 case R_ARM_ABS32:
9642 value += addend;
9643 if (branch_type == ST_BRANCH_TO_THUMB)
9644 value |= 1;
9645 break;
9646
9647 case R_ARM_ABS32_NOI:
9648 value += addend;
9649 break;
9650
9651 case R_ARM_REL32:
9652 value += addend;
9653 if (branch_type == ST_BRANCH_TO_THUMB)
9654 value |= 1;
9655 value -= (input_section->output_section->vma
9656 + input_section->output_offset + rel->r_offset);
9657 break;
9658
9659 case R_ARM_REL32_NOI:
9660 value += addend;
9661 value -= (input_section->output_section->vma
9662 + input_section->output_offset + rel->r_offset);
9663 break;
9664
9665 case R_ARM_PREL31:
9666 value -= (input_section->output_section->vma
9667 + input_section->output_offset + rel->r_offset);
9668 value += signed_addend;
9669 if (! h || h->root.type != bfd_link_hash_undefweak)
9670 {
9671 /* Check for overflow. */
9672 if ((value ^ (value >> 1)) & (1 << 30))
9673 return bfd_reloc_overflow;
9674 }
9675 value &= 0x7fffffff;
9676 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9677 if (branch_type == ST_BRANCH_TO_THUMB)
9678 value |= 1;
9679 break;
9680 }
9681
9682 bfd_put_32 (input_bfd, value, hit_data);
9683 return bfd_reloc_ok;
9684
9685 case R_ARM_ABS8:
9686 /* PR 16202: Refectch the addend using the correct size. */
9687 if (globals->use_rel)
9688 addend = bfd_get_8 (input_bfd, hit_data);
9689 value += addend;
9690
9691 /* There is no way to tell whether the user intended to use a signed or
9692 unsigned addend. When checking for overflow we accept either,
9693 as specified by the AAELF. */
9694 if ((long) value > 0xff || (long) value < -0x80)
9695 return bfd_reloc_overflow;
9696
9697 bfd_put_8 (input_bfd, value, hit_data);
9698 return bfd_reloc_ok;
9699
9700 case R_ARM_ABS16:
9701 /* PR 16202: Refectch the addend using the correct size. */
9702 if (globals->use_rel)
9703 addend = bfd_get_16 (input_bfd, hit_data);
9704 value += addend;
9705
9706 /* See comment for R_ARM_ABS8. */
9707 if ((long) value > 0xffff || (long) value < -0x8000)
9708 return bfd_reloc_overflow;
9709
9710 bfd_put_16 (input_bfd, value, hit_data);
9711 return bfd_reloc_ok;
9712
9713 case R_ARM_THM_ABS5:
9714 /* Support ldr and str instructions for the thumb. */
9715 if (globals->use_rel)
9716 {
9717 /* Need to refetch addend. */
9718 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9719 /* ??? Need to determine shift amount from operand size. */
9720 addend >>= howto->rightshift;
9721 }
9722 value += addend;
9723
9724 /* ??? Isn't value unsigned? */
9725 if ((long) value > 0x1f || (long) value < -0x10)
9726 return bfd_reloc_overflow;
9727
9728 /* ??? Value needs to be properly shifted into place first. */
9729 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9730 bfd_put_16 (input_bfd, value, hit_data);
9731 return bfd_reloc_ok;
9732
9733 case R_ARM_THM_ALU_PREL_11_0:
9734 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9735 {
9736 bfd_vma insn;
9737 bfd_signed_vma relocation;
9738
9739 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9740 | bfd_get_16 (input_bfd, hit_data + 2);
9741
9742 if (globals->use_rel)
9743 {
9744 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9745 | ((insn & (1 << 26)) >> 15);
9746 if (insn & 0xf00000)
9747 signed_addend = -signed_addend;
9748 }
9749
9750 relocation = value + signed_addend;
9751 relocation -= Pa (input_section->output_section->vma
9752 + input_section->output_offset
9753 + rel->r_offset);
9754
9755 value = relocation;
9756
9757 if (value >= 0x1000)
9758 return bfd_reloc_overflow;
9759
9760 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9761 | ((value & 0x700) << 4)
9762 | ((value & 0x800) << 15);
9763 if (relocation < 0)
9764 insn |= 0xa00000;
9765
9766 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9767 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9768
9769 return bfd_reloc_ok;
9770 }
9771
9772 case R_ARM_THM_PC8:
9773 /* PR 10073: This reloc is not generated by the GNU toolchain,
9774 but it is supported for compatibility with third party libraries
9775 generated by other compilers, specifically the ARM/IAR. */
9776 {
9777 bfd_vma insn;
9778 bfd_signed_vma relocation;
9779
9780 insn = bfd_get_16 (input_bfd, hit_data);
9781
9782 if (globals->use_rel)
9783 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9784
9785 relocation = value + addend;
9786 relocation -= Pa (input_section->output_section->vma
9787 + input_section->output_offset
9788 + rel->r_offset);
9789
9790 value = relocation;
9791
9792 /* We do not check for overflow of this reloc. Although strictly
9793 speaking this is incorrect, it appears to be necessary in order
9794 to work with IAR generated relocs. Since GCC and GAS do not
9795 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9796 a problem for them. */
9797 value &= 0x3fc;
9798
9799 insn = (insn & 0xff00) | (value >> 2);
9800
9801 bfd_put_16 (input_bfd, insn, hit_data);
9802
9803 return bfd_reloc_ok;
9804 }
9805
9806 case R_ARM_THM_PC12:
9807 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9808 {
9809 bfd_vma insn;
9810 bfd_signed_vma relocation;
9811
9812 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9813 | bfd_get_16 (input_bfd, hit_data + 2);
9814
9815 if (globals->use_rel)
9816 {
9817 signed_addend = insn & 0xfff;
9818 if (!(insn & (1 << 23)))
9819 signed_addend = -signed_addend;
9820 }
9821
9822 relocation = value + signed_addend;
9823 relocation -= Pa (input_section->output_section->vma
9824 + input_section->output_offset
9825 + rel->r_offset);
9826
9827 value = relocation;
9828
9829 if (value >= 0x1000)
9830 return bfd_reloc_overflow;
9831
9832 insn = (insn & 0xff7ff000) | value;
9833 if (relocation >= 0)
9834 insn |= (1 << 23);
9835
9836 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9837 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9838
9839 return bfd_reloc_ok;
9840 }
9841
9842 case R_ARM_THM_XPC22:
9843 case R_ARM_THM_CALL:
9844 case R_ARM_THM_JUMP24:
9845 /* Thumb BL (branch long instruction). */
9846 {
9847 bfd_vma relocation;
9848 bfd_vma reloc_sign;
9849 bfd_boolean overflow = FALSE;
9850 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9851 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9852 bfd_signed_vma reloc_signed_max;
9853 bfd_signed_vma reloc_signed_min;
9854 bfd_vma check;
9855 bfd_signed_vma signed_check;
9856 int bitsize;
9857 const int thumb2 = using_thumb2 (globals);
9858 const int thumb2_bl = using_thumb2_bl (globals);
9859
9860 /* A branch to an undefined weak symbol is turned into a jump to
9861 the next instruction unless a PLT entry will be created.
9862 The jump to the next instruction is optimized as a NOP.W for
9863 Thumb-2 enabled architectures. */
9864 if (h && h->root.type == bfd_link_hash_undefweak
9865 && plt_offset == (bfd_vma) -1)
9866 {
9867 if (thumb2)
9868 {
9869 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9870 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9871 }
9872 else
9873 {
9874 bfd_put_16 (input_bfd, 0xe000, hit_data);
9875 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9876 }
9877 return bfd_reloc_ok;
9878 }
9879
9880 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9881 with Thumb-1) involving the J1 and J2 bits. */
9882 if (globals->use_rel)
9883 {
9884 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9885 bfd_vma upper = upper_insn & 0x3ff;
9886 bfd_vma lower = lower_insn & 0x7ff;
9887 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9888 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9889 bfd_vma i1 = j1 ^ s ? 0 : 1;
9890 bfd_vma i2 = j2 ^ s ? 0 : 1;
9891
9892 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9893 /* Sign extend. */
9894 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9895
9896 signed_addend = addend;
9897 }
9898
9899 if (r_type == R_ARM_THM_XPC22)
9900 {
9901 /* Check for Thumb to Thumb call. */
9902 /* FIXME: Should we translate the instruction into a BL
9903 instruction instead ? */
9904 if (branch_type == ST_BRANCH_TO_THUMB)
9905 (*_bfd_error_handler)
9906 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9907 input_bfd,
9908 h ? h->root.root.string : "(local)");
9909 }
9910 else
9911 {
9912 /* If it is not a call to Thumb, assume call to Arm.
9913 If it is a call relative to a section name, then it is not a
9914 function call at all, but rather a long jump. Calls through
9915 the PLT do not require stubs. */
9916 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9917 {
9918 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9919 {
9920 /* Convert BL to BLX. */
9921 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9922 }
9923 else if (( r_type != R_ARM_THM_CALL)
9924 && (r_type != R_ARM_THM_JUMP24))
9925 {
9926 if (elf32_thumb_to_arm_stub
9927 (info, sym_name, input_bfd, output_bfd, input_section,
9928 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9929 error_message))
9930 return bfd_reloc_ok;
9931 else
9932 return bfd_reloc_dangerous;
9933 }
9934 }
9935 else if (branch_type == ST_BRANCH_TO_THUMB
9936 && globals->use_blx
9937 && r_type == R_ARM_THM_CALL)
9938 {
9939 /* Make sure this is a BL. */
9940 lower_insn |= 0x1800;
9941 }
9942 }
9943
9944 enum elf32_arm_stub_type stub_type = arm_stub_none;
9945 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9946 {
9947 /* Check if a stub has to be inserted because the destination
9948 is too far. */
9949 struct elf32_arm_stub_hash_entry *stub_entry;
9950 struct elf32_arm_link_hash_entry *hash;
9951
9952 hash = (struct elf32_arm_link_hash_entry *) h;
9953
9954 stub_type = arm_type_of_stub (info, input_section, rel,
9955 st_type, &branch_type,
9956 hash, value, sym_sec,
9957 input_bfd, sym_name);
9958
9959 if (stub_type != arm_stub_none)
9960 {
9961 /* The target is out of reach or we are changing modes, so
9962 redirect the branch to the local stub for this
9963 function. */
9964 stub_entry = elf32_arm_get_stub_entry (input_section,
9965 sym_sec, h,
9966 rel, globals,
9967 stub_type);
9968 if (stub_entry != NULL)
9969 {
9970 value = (stub_entry->stub_offset
9971 + stub_entry->stub_sec->output_offset
9972 + stub_entry->stub_sec->output_section->vma);
9973
9974 if (plt_offset != (bfd_vma) -1)
9975 *unresolved_reloc_p = FALSE;
9976 }
9977
9978 /* If this call becomes a call to Arm, force BLX. */
9979 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9980 {
9981 if ((stub_entry
9982 && !arm_stub_is_thumb (stub_entry->stub_type))
9983 || branch_type != ST_BRANCH_TO_THUMB)
9984 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9985 }
9986 }
9987 }
9988
9989 /* Handle calls via the PLT. */
9990 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9991 {
9992 value = (splt->output_section->vma
9993 + splt->output_offset
9994 + plt_offset);
9995
9996 if (globals->use_blx
9997 && r_type == R_ARM_THM_CALL
9998 && ! using_thumb_only (globals))
9999 {
10000 /* If the Thumb BLX instruction is available, convert
10001 the BL to a BLX instruction to call the ARM-mode
10002 PLT entry. */
10003 lower_insn = (lower_insn & ~0x1000) | 0x0800;
10004 branch_type = ST_BRANCH_TO_ARM;
10005 }
10006 else
10007 {
10008 if (! using_thumb_only (globals))
10009 /* Target the Thumb stub before the ARM PLT entry. */
10010 value -= PLT_THUMB_STUB_SIZE;
10011 branch_type = ST_BRANCH_TO_THUMB;
10012 }
10013 *unresolved_reloc_p = FALSE;
10014 }
10015
10016 relocation = value + signed_addend;
10017
10018 relocation -= (input_section->output_section->vma
10019 + input_section->output_offset
10020 + rel->r_offset);
10021
10022 check = relocation >> howto->rightshift;
10023
10024 /* If this is a signed value, the rightshift just dropped
10025 leading 1 bits (assuming twos complement). */
10026 if ((bfd_signed_vma) relocation >= 0)
10027 signed_check = check;
10028 else
10029 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
10030
10031 /* Calculate the permissable maximum and minimum values for
10032 this relocation according to whether we're relocating for
10033 Thumb-2 or not. */
10034 bitsize = howto->bitsize;
10035 if (!thumb2_bl)
10036 bitsize -= 2;
10037 reloc_signed_max = (1 << (bitsize - 1)) - 1;
10038 reloc_signed_min = ~reloc_signed_max;
10039
10040 /* Assumes two's complement. */
10041 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10042 overflow = TRUE;
10043
10044 if ((lower_insn & 0x5000) == 0x4000)
10045 /* For a BLX instruction, make sure that the relocation is rounded up
10046 to a word boundary. This follows the semantics of the instruction
10047 which specifies that bit 1 of the target address will come from bit
10048 1 of the base address. */
10049 relocation = (relocation + 2) & ~ 3;
10050
10051 /* Put RELOCATION back into the insn. Assumes two's complement.
10052 We use the Thumb-2 encoding, which is safe even if dealing with
10053 a Thumb-1 instruction by virtue of our overflow check above. */
10054 reloc_sign = (signed_check < 0) ? 1 : 0;
10055 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
10056 | ((relocation >> 12) & 0x3ff)
10057 | (reloc_sign << 10);
10058 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
10059 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
10060 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
10061 | ((relocation >> 1) & 0x7ff);
10062
10063 /* Put the relocated value back in the object file: */
10064 bfd_put_16 (input_bfd, upper_insn, hit_data);
10065 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10066
10067 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10068 }
10069 break;
10070
10071 case R_ARM_THM_JUMP19:
10072 /* Thumb32 conditional branch instruction. */
10073 {
10074 bfd_vma relocation;
10075 bfd_boolean overflow = FALSE;
10076 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10077 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10078 bfd_signed_vma reloc_signed_max = 0xffffe;
10079 bfd_signed_vma reloc_signed_min = -0x100000;
10080 bfd_signed_vma signed_check;
10081 enum elf32_arm_stub_type stub_type = arm_stub_none;
10082 struct elf32_arm_stub_hash_entry *stub_entry;
10083 struct elf32_arm_link_hash_entry *hash;
10084
10085 /* Need to refetch the addend, reconstruct the top three bits,
10086 and squish the two 11 bit pieces together. */
10087 if (globals->use_rel)
10088 {
10089 bfd_vma S = (upper_insn & 0x0400) >> 10;
10090 bfd_vma upper = (upper_insn & 0x003f);
10091 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
10092 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
10093 bfd_vma lower = (lower_insn & 0x07ff);
10094
10095 upper |= J1 << 6;
10096 upper |= J2 << 7;
10097 upper |= (!S) << 8;
10098 upper -= 0x0100; /* Sign extend. */
10099
10100 addend = (upper << 12) | (lower << 1);
10101 signed_addend = addend;
10102 }
10103
10104 /* Handle calls via the PLT. */
10105 if (plt_offset != (bfd_vma) -1)
10106 {
10107 value = (splt->output_section->vma
10108 + splt->output_offset
10109 + plt_offset);
10110 /* Target the Thumb stub before the ARM PLT entry. */
10111 value -= PLT_THUMB_STUB_SIZE;
10112 *unresolved_reloc_p = FALSE;
10113 }
10114
10115 hash = (struct elf32_arm_link_hash_entry *)h;
10116
10117 stub_type = arm_type_of_stub (info, input_section, rel,
10118 st_type, &branch_type,
10119 hash, value, sym_sec,
10120 input_bfd, sym_name);
10121 if (stub_type != arm_stub_none)
10122 {
10123 stub_entry = elf32_arm_get_stub_entry (input_section,
10124 sym_sec, h,
10125 rel, globals,
10126 stub_type);
10127 if (stub_entry != NULL)
10128 {
10129 value = (stub_entry->stub_offset
10130 + stub_entry->stub_sec->output_offset
10131 + stub_entry->stub_sec->output_section->vma);
10132 }
10133 }
10134
10135 relocation = value + signed_addend;
10136 relocation -= (input_section->output_section->vma
10137 + input_section->output_offset
10138 + rel->r_offset);
10139 signed_check = (bfd_signed_vma) relocation;
10140
10141 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10142 overflow = TRUE;
10143
10144 /* Put RELOCATION back into the insn. */
10145 {
10146 bfd_vma S = (relocation & 0x00100000) >> 20;
10147 bfd_vma J2 = (relocation & 0x00080000) >> 19;
10148 bfd_vma J1 = (relocation & 0x00040000) >> 18;
10149 bfd_vma hi = (relocation & 0x0003f000) >> 12;
10150 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
10151
10152 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10153 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10154 }
10155
10156 /* Put the relocated value back in the object file: */
10157 bfd_put_16 (input_bfd, upper_insn, hit_data);
10158 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10159
10160 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10161 }
10162
10163 case R_ARM_THM_JUMP11:
10164 case R_ARM_THM_JUMP8:
10165 case R_ARM_THM_JUMP6:
10166 /* Thumb B (branch) instruction). */
10167 {
10168 bfd_signed_vma relocation;
10169 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10170 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10171 bfd_signed_vma signed_check;
10172
10173 /* CZB cannot jump backward. */
10174 if (r_type == R_ARM_THM_JUMP6)
10175 reloc_signed_min = 0;
10176
10177 if (globals->use_rel)
10178 {
10179 /* Need to refetch addend. */
10180 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10181 if (addend & ((howto->src_mask + 1) >> 1))
10182 {
10183 signed_addend = -1;
10184 signed_addend &= ~ howto->src_mask;
10185 signed_addend |= addend;
10186 }
10187 else
10188 signed_addend = addend;
10189 /* The value in the insn has been right shifted. We need to
10190 undo this, so that we can perform the address calculation
10191 in terms of bytes. */
10192 signed_addend <<= howto->rightshift;
10193 }
10194 relocation = value + signed_addend;
10195
10196 relocation -= (input_section->output_section->vma
10197 + input_section->output_offset
10198 + rel->r_offset);
10199
10200 relocation >>= howto->rightshift;
10201 signed_check = relocation;
10202
10203 if (r_type == R_ARM_THM_JUMP6)
10204 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10205 else
10206 relocation &= howto->dst_mask;
10207 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10208
10209 bfd_put_16 (input_bfd, relocation, hit_data);
10210
10211 /* Assumes two's complement. */
10212 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10213 return bfd_reloc_overflow;
10214
10215 return bfd_reloc_ok;
10216 }
10217
10218 case R_ARM_ALU_PCREL7_0:
10219 case R_ARM_ALU_PCREL15_8:
10220 case R_ARM_ALU_PCREL23_15:
10221 {
10222 bfd_vma insn;
10223 bfd_vma relocation;
10224
10225 insn = bfd_get_32 (input_bfd, hit_data);
10226 if (globals->use_rel)
10227 {
10228 /* Extract the addend. */
10229 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10230 signed_addend = addend;
10231 }
10232 relocation = value + signed_addend;
10233
10234 relocation -= (input_section->output_section->vma
10235 + input_section->output_offset
10236 + rel->r_offset);
10237 insn = (insn & ~0xfff)
10238 | ((howto->bitpos << 7) & 0xf00)
10239 | ((relocation >> howto->bitpos) & 0xff);
10240 bfd_put_32 (input_bfd, value, hit_data);
10241 }
10242 return bfd_reloc_ok;
10243
10244 case R_ARM_GNU_VTINHERIT:
10245 case R_ARM_GNU_VTENTRY:
10246 return bfd_reloc_ok;
10247
10248 case R_ARM_GOTOFF32:
10249 /* Relocation is relative to the start of the
10250 global offset table. */
10251
10252 BFD_ASSERT (sgot != NULL);
10253 if (sgot == NULL)
10254 return bfd_reloc_notsupported;
10255
10256 /* If we are addressing a Thumb function, we need to adjust the
10257 address by one, so that attempts to call the function pointer will
10258 correctly interpret it as Thumb code. */
10259 if (branch_type == ST_BRANCH_TO_THUMB)
10260 value += 1;
10261
10262 /* Note that sgot->output_offset is not involved in this
10263 calculation. We always want the start of .got. If we
10264 define _GLOBAL_OFFSET_TABLE in a different way, as is
10265 permitted by the ABI, we might have to change this
10266 calculation. */
10267 value -= sgot->output_section->vma;
10268 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10269 contents, rel->r_offset, value,
10270 rel->r_addend);
10271
10272 case R_ARM_GOTPC:
10273 /* Use global offset table as symbol value. */
10274 BFD_ASSERT (sgot != NULL);
10275
10276 if (sgot == NULL)
10277 return bfd_reloc_notsupported;
10278
10279 *unresolved_reloc_p = FALSE;
10280 value = sgot->output_section->vma;
10281 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10282 contents, rel->r_offset, value,
10283 rel->r_addend);
10284
10285 case R_ARM_GOT32:
10286 case R_ARM_GOT_PREL:
10287 /* Relocation is to the entry for this symbol in the
10288 global offset table. */
10289 if (sgot == NULL)
10290 return bfd_reloc_notsupported;
10291
10292 if (dynreloc_st_type == STT_GNU_IFUNC
10293 && plt_offset != (bfd_vma) -1
10294 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10295 {
10296 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10297 symbol, and the relocation resolves directly to the runtime
10298 target rather than to the .iplt entry. This means that any
10299 .got entry would be the same value as the .igot.plt entry,
10300 so there's no point creating both. */
10301 sgot = globals->root.igotplt;
10302 value = sgot->output_offset + gotplt_offset;
10303 }
10304 else if (h != NULL)
10305 {
10306 bfd_vma off;
10307
10308 off = h->got.offset;
10309 BFD_ASSERT (off != (bfd_vma) -1);
10310 if ((off & 1) != 0)
10311 {
10312 /* We have already processsed one GOT relocation against
10313 this symbol. */
10314 off &= ~1;
10315 if (globals->root.dynamic_sections_created
10316 && !SYMBOL_REFERENCES_LOCAL (info, h))
10317 *unresolved_reloc_p = FALSE;
10318 }
10319 else
10320 {
10321 Elf_Internal_Rela outrel;
10322
10323 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10324 {
10325 /* If the symbol doesn't resolve locally in a static
10326 object, we have an undefined reference. If the
10327 symbol doesn't resolve locally in a dynamic object,
10328 it should be resolved by the dynamic linker. */
10329 if (globals->root.dynamic_sections_created)
10330 {
10331 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10332 *unresolved_reloc_p = FALSE;
10333 }
10334 else
10335 outrel.r_info = 0;
10336 outrel.r_addend = 0;
10337 }
10338 else
10339 {
10340 if (dynreloc_st_type == STT_GNU_IFUNC)
10341 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10342 else if (bfd_link_pic (info) &&
10343 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10344 || h->root.type != bfd_link_hash_undefweak))
10345 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10346 else
10347 outrel.r_info = 0;
10348 outrel.r_addend = dynreloc_value;
10349 }
10350
10351 /* The GOT entry is initialized to zero by default.
10352 See if we should install a different value. */
10353 if (outrel.r_addend != 0
10354 && (outrel.r_info == 0 || globals->use_rel))
10355 {
10356 bfd_put_32 (output_bfd, outrel.r_addend,
10357 sgot->contents + off);
10358 outrel.r_addend = 0;
10359 }
10360
10361 if (outrel.r_info != 0)
10362 {
10363 outrel.r_offset = (sgot->output_section->vma
10364 + sgot->output_offset
10365 + off);
10366 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10367 }
10368 h->got.offset |= 1;
10369 }
10370 value = sgot->output_offset + off;
10371 }
10372 else
10373 {
10374 bfd_vma off;
10375
10376 BFD_ASSERT (local_got_offsets != NULL &&
10377 local_got_offsets[r_symndx] != (bfd_vma) -1);
10378
10379 off = local_got_offsets[r_symndx];
10380
10381 /* The offset must always be a multiple of 4. We use the
10382 least significant bit to record whether we have already
10383 generated the necessary reloc. */
10384 if ((off & 1) != 0)
10385 off &= ~1;
10386 else
10387 {
10388 if (globals->use_rel)
10389 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10390
10391 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10392 {
10393 Elf_Internal_Rela outrel;
10394
10395 outrel.r_addend = addend + dynreloc_value;
10396 outrel.r_offset = (sgot->output_section->vma
10397 + sgot->output_offset
10398 + off);
10399 if (dynreloc_st_type == STT_GNU_IFUNC)
10400 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10401 else
10402 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10403 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10404 }
10405
10406 local_got_offsets[r_symndx] |= 1;
10407 }
10408
10409 value = sgot->output_offset + off;
10410 }
10411 if (r_type != R_ARM_GOT32)
10412 value += sgot->output_section->vma;
10413
10414 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10415 contents, rel->r_offset, value,
10416 rel->r_addend);
10417
10418 case R_ARM_TLS_LDO32:
10419 value = value - dtpoff_base (info);
10420
10421 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10422 contents, rel->r_offset, value,
10423 rel->r_addend);
10424
10425 case R_ARM_TLS_LDM32:
10426 {
10427 bfd_vma off;
10428
10429 if (sgot == NULL)
10430 abort ();
10431
10432 off = globals->tls_ldm_got.offset;
10433
10434 if ((off & 1) != 0)
10435 off &= ~1;
10436 else
10437 {
10438 /* If we don't know the module number, create a relocation
10439 for it. */
10440 if (bfd_link_pic (info))
10441 {
10442 Elf_Internal_Rela outrel;
10443
10444 if (srelgot == NULL)
10445 abort ();
10446
10447 outrel.r_addend = 0;
10448 outrel.r_offset = (sgot->output_section->vma
10449 + sgot->output_offset + off);
10450 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10451
10452 if (globals->use_rel)
10453 bfd_put_32 (output_bfd, outrel.r_addend,
10454 sgot->contents + off);
10455
10456 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10457 }
10458 else
10459 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10460
10461 globals->tls_ldm_got.offset |= 1;
10462 }
10463
10464 value = sgot->output_section->vma + sgot->output_offset + off
10465 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10466
10467 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10468 contents, rel->r_offset, value,
10469 rel->r_addend);
10470 }
10471
10472 case R_ARM_TLS_CALL:
10473 case R_ARM_THM_TLS_CALL:
10474 case R_ARM_TLS_GD32:
10475 case R_ARM_TLS_IE32:
10476 case R_ARM_TLS_GOTDESC:
10477 case R_ARM_TLS_DESCSEQ:
10478 case R_ARM_THM_TLS_DESCSEQ:
10479 {
10480 bfd_vma off, offplt;
10481 int indx = 0;
10482 char tls_type;
10483
10484 BFD_ASSERT (sgot != NULL);
10485
10486 if (h != NULL)
10487 {
10488 bfd_boolean dyn;
10489 dyn = globals->root.dynamic_sections_created;
10490 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10491 bfd_link_pic (info),
10492 h)
10493 && (!bfd_link_pic (info)
10494 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10495 {
10496 *unresolved_reloc_p = FALSE;
10497 indx = h->dynindx;
10498 }
10499 off = h->got.offset;
10500 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10501 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10502 }
10503 else
10504 {
10505 BFD_ASSERT (local_got_offsets != NULL);
10506 off = local_got_offsets[r_symndx];
10507 offplt = local_tlsdesc_gotents[r_symndx];
10508 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10509 }
10510
10511 /* Linker relaxations happens from one of the
10512 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10513 if (ELF32_R_TYPE(rel->r_info) != r_type)
10514 tls_type = GOT_TLS_IE;
10515
10516 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10517
10518 if ((off & 1) != 0)
10519 off &= ~1;
10520 else
10521 {
10522 bfd_boolean need_relocs = FALSE;
10523 Elf_Internal_Rela outrel;
10524 int cur_off = off;
10525
10526 /* The GOT entries have not been initialized yet. Do it
10527 now, and emit any relocations. If both an IE GOT and a
10528 GD GOT are necessary, we emit the GD first. */
10529
10530 if ((bfd_link_pic (info) || indx != 0)
10531 && (h == NULL
10532 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10533 || h->root.type != bfd_link_hash_undefweak))
10534 {
10535 need_relocs = TRUE;
10536 BFD_ASSERT (srelgot != NULL);
10537 }
10538
10539 if (tls_type & GOT_TLS_GDESC)
10540 {
10541 bfd_byte *loc;
10542
10543 /* We should have relaxed, unless this is an undefined
10544 weak symbol. */
10545 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10546 || bfd_link_pic (info));
10547 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10548 <= globals->root.sgotplt->size);
10549
10550 outrel.r_addend = 0;
10551 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10552 + globals->root.sgotplt->output_offset
10553 + offplt
10554 + globals->sgotplt_jump_table_size);
10555
10556 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10557 sreloc = globals->root.srelplt;
10558 loc = sreloc->contents;
10559 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10560 BFD_ASSERT (loc + RELOC_SIZE (globals)
10561 <= sreloc->contents + sreloc->size);
10562
10563 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10564
10565 /* For globals, the first word in the relocation gets
10566 the relocation index and the top bit set, or zero,
10567 if we're binding now. For locals, it gets the
10568 symbol's offset in the tls section. */
10569 bfd_put_32 (output_bfd,
10570 !h ? value - elf_hash_table (info)->tls_sec->vma
10571 : info->flags & DF_BIND_NOW ? 0
10572 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10573 globals->root.sgotplt->contents + offplt
10574 + globals->sgotplt_jump_table_size);
10575
10576 /* Second word in the relocation is always zero. */
10577 bfd_put_32 (output_bfd, 0,
10578 globals->root.sgotplt->contents + offplt
10579 + globals->sgotplt_jump_table_size + 4);
10580 }
10581 if (tls_type & GOT_TLS_GD)
10582 {
10583 if (need_relocs)
10584 {
10585 outrel.r_addend = 0;
10586 outrel.r_offset = (sgot->output_section->vma
10587 + sgot->output_offset
10588 + cur_off);
10589 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10590
10591 if (globals->use_rel)
10592 bfd_put_32 (output_bfd, outrel.r_addend,
10593 sgot->contents + cur_off);
10594
10595 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10596
10597 if (indx == 0)
10598 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10599 sgot->contents + cur_off + 4);
10600 else
10601 {
10602 outrel.r_addend = 0;
10603 outrel.r_info = ELF32_R_INFO (indx,
10604 R_ARM_TLS_DTPOFF32);
10605 outrel.r_offset += 4;
10606
10607 if (globals->use_rel)
10608 bfd_put_32 (output_bfd, outrel.r_addend,
10609 sgot->contents + cur_off + 4);
10610
10611 elf32_arm_add_dynreloc (output_bfd, info,
10612 srelgot, &outrel);
10613 }
10614 }
10615 else
10616 {
10617 /* If we are not emitting relocations for a
10618 general dynamic reference, then we must be in a
10619 static link or an executable link with the
10620 symbol binding locally. Mark it as belonging
10621 to module 1, the executable. */
10622 bfd_put_32 (output_bfd, 1,
10623 sgot->contents + cur_off);
10624 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10625 sgot->contents + cur_off + 4);
10626 }
10627
10628 cur_off += 8;
10629 }
10630
10631 if (tls_type & GOT_TLS_IE)
10632 {
10633 if (need_relocs)
10634 {
10635 if (indx == 0)
10636 outrel.r_addend = value - dtpoff_base (info);
10637 else
10638 outrel.r_addend = 0;
10639 outrel.r_offset = (sgot->output_section->vma
10640 + sgot->output_offset
10641 + cur_off);
10642 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10643
10644 if (globals->use_rel)
10645 bfd_put_32 (output_bfd, outrel.r_addend,
10646 sgot->contents + cur_off);
10647
10648 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10649 }
10650 else
10651 bfd_put_32 (output_bfd, tpoff (info, value),
10652 sgot->contents + cur_off);
10653 cur_off += 4;
10654 }
10655
10656 if (h != NULL)
10657 h->got.offset |= 1;
10658 else
10659 local_got_offsets[r_symndx] |= 1;
10660 }
10661
10662 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10663 off += 8;
10664 else if (tls_type & GOT_TLS_GDESC)
10665 off = offplt;
10666
10667 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10668 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10669 {
10670 bfd_signed_vma offset;
10671 /* TLS stubs are arm mode. The original symbol is a
10672 data object, so branch_type is bogus. */
10673 branch_type = ST_BRANCH_TO_ARM;
10674 enum elf32_arm_stub_type stub_type
10675 = arm_type_of_stub (info, input_section, rel,
10676 st_type, &branch_type,
10677 (struct elf32_arm_link_hash_entry *)h,
10678 globals->tls_trampoline, globals->root.splt,
10679 input_bfd, sym_name);
10680
10681 if (stub_type != arm_stub_none)
10682 {
10683 struct elf32_arm_stub_hash_entry *stub_entry
10684 = elf32_arm_get_stub_entry
10685 (input_section, globals->root.splt, 0, rel,
10686 globals, stub_type);
10687 offset = (stub_entry->stub_offset
10688 + stub_entry->stub_sec->output_offset
10689 + stub_entry->stub_sec->output_section->vma);
10690 }
10691 else
10692 offset = (globals->root.splt->output_section->vma
10693 + globals->root.splt->output_offset
10694 + globals->tls_trampoline);
10695
10696 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10697 {
10698 unsigned long inst;
10699
10700 offset -= (input_section->output_section->vma
10701 + input_section->output_offset
10702 + rel->r_offset + 8);
10703
10704 inst = offset >> 2;
10705 inst &= 0x00ffffff;
10706 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10707 }
10708 else
10709 {
10710 /* Thumb blx encodes the offset in a complicated
10711 fashion. */
10712 unsigned upper_insn, lower_insn;
10713 unsigned neg;
10714
10715 offset -= (input_section->output_section->vma
10716 + input_section->output_offset
10717 + rel->r_offset + 4);
10718
10719 if (stub_type != arm_stub_none
10720 && arm_stub_is_thumb (stub_type))
10721 {
10722 lower_insn = 0xd000;
10723 }
10724 else
10725 {
10726 lower_insn = 0xc000;
10727 /* Round up the offset to a word boundary. */
10728 offset = (offset + 2) & ~2;
10729 }
10730
10731 neg = offset < 0;
10732 upper_insn = (0xf000
10733 | ((offset >> 12) & 0x3ff)
10734 | (neg << 10));
10735 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10736 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10737 | ((offset >> 1) & 0x7ff);
10738 bfd_put_16 (input_bfd, upper_insn, hit_data);
10739 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10740 return bfd_reloc_ok;
10741 }
10742 }
10743 /* These relocations needs special care, as besides the fact
10744 they point somewhere in .gotplt, the addend must be
10745 adjusted accordingly depending on the type of instruction
10746 we refer to. */
10747 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10748 {
10749 unsigned long data, insn;
10750 unsigned thumb;
10751
10752 data = bfd_get_32 (input_bfd, hit_data);
10753 thumb = data & 1;
10754 data &= ~1u;
10755
10756 if (thumb)
10757 {
10758 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10759 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10760 insn = (insn << 16)
10761 | bfd_get_16 (input_bfd,
10762 contents + rel->r_offset - data + 2);
10763 if ((insn & 0xf800c000) == 0xf000c000)
10764 /* bl/blx */
10765 value = -6;
10766 else if ((insn & 0xffffff00) == 0x4400)
10767 /* add */
10768 value = -5;
10769 else
10770 {
10771 (*_bfd_error_handler)
10772 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10773 input_bfd, input_section,
10774 (unsigned long)rel->r_offset, insn);
10775 return bfd_reloc_notsupported;
10776 }
10777 }
10778 else
10779 {
10780 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10781
10782 switch (insn >> 24)
10783 {
10784 case 0xeb: /* bl */
10785 case 0xfa: /* blx */
10786 value = -4;
10787 break;
10788
10789 case 0xe0: /* add */
10790 value = -8;
10791 break;
10792
10793 default:
10794 (*_bfd_error_handler)
10795 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10796 input_bfd, input_section,
10797 (unsigned long)rel->r_offset, insn);
10798 return bfd_reloc_notsupported;
10799 }
10800 }
10801
10802 value += ((globals->root.sgotplt->output_section->vma
10803 + globals->root.sgotplt->output_offset + off)
10804 - (input_section->output_section->vma
10805 + input_section->output_offset
10806 + rel->r_offset)
10807 + globals->sgotplt_jump_table_size);
10808 }
10809 else
10810 value = ((globals->root.sgot->output_section->vma
10811 + globals->root.sgot->output_offset + off)
10812 - (input_section->output_section->vma
10813 + input_section->output_offset + rel->r_offset));
10814
10815 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10816 contents, rel->r_offset, value,
10817 rel->r_addend);
10818 }
10819
10820 case R_ARM_TLS_LE32:
10821 if (bfd_link_dll (info))
10822 {
10823 (*_bfd_error_handler)
10824 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10825 input_bfd, input_section,
10826 (long) rel->r_offset, howto->name);
10827 return bfd_reloc_notsupported;
10828 }
10829 else
10830 value = tpoff (info, value);
10831
10832 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10833 contents, rel->r_offset, value,
10834 rel->r_addend);
10835
10836 case R_ARM_V4BX:
10837 if (globals->fix_v4bx)
10838 {
10839 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10840
10841 /* Ensure that we have a BX instruction. */
10842 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10843
10844 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10845 {
10846 /* Branch to veneer. */
10847 bfd_vma glue_addr;
10848 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10849 glue_addr -= input_section->output_section->vma
10850 + input_section->output_offset
10851 + rel->r_offset + 8;
10852 insn = (insn & 0xf0000000) | 0x0a000000
10853 | ((glue_addr >> 2) & 0x00ffffff);
10854 }
10855 else
10856 {
10857 /* Preserve Rm (lowest four bits) and the condition code
10858 (highest four bits). Other bits encode MOV PC,Rm. */
10859 insn = (insn & 0xf000000f) | 0x01a0f000;
10860 }
10861
10862 bfd_put_32 (input_bfd, insn, hit_data);
10863 }
10864 return bfd_reloc_ok;
10865
10866 case R_ARM_MOVW_ABS_NC:
10867 case R_ARM_MOVT_ABS:
10868 case R_ARM_MOVW_PREL_NC:
10869 case R_ARM_MOVT_PREL:
10870 /* Until we properly support segment-base-relative addressing then
10871 we assume the segment base to be zero, as for the group relocations.
10872 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10873 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10874 case R_ARM_MOVW_BREL_NC:
10875 case R_ARM_MOVW_BREL:
10876 case R_ARM_MOVT_BREL:
10877 {
10878 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10879
10880 if (globals->use_rel)
10881 {
10882 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10883 signed_addend = (addend ^ 0x8000) - 0x8000;
10884 }
10885
10886 value += signed_addend;
10887
10888 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10889 value -= (input_section->output_section->vma
10890 + input_section->output_offset + rel->r_offset);
10891
10892 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10893 return bfd_reloc_overflow;
10894
10895 if (branch_type == ST_BRANCH_TO_THUMB)
10896 value |= 1;
10897
10898 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10899 || r_type == R_ARM_MOVT_BREL)
10900 value >>= 16;
10901
10902 insn &= 0xfff0f000;
10903 insn |= value & 0xfff;
10904 insn |= (value & 0xf000) << 4;
10905 bfd_put_32 (input_bfd, insn, hit_data);
10906 }
10907 return bfd_reloc_ok;
10908
10909 case R_ARM_THM_MOVW_ABS_NC:
10910 case R_ARM_THM_MOVT_ABS:
10911 case R_ARM_THM_MOVW_PREL_NC:
10912 case R_ARM_THM_MOVT_PREL:
10913 /* Until we properly support segment-base-relative addressing then
10914 we assume the segment base to be zero, as for the above relocations.
10915 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10916 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10917 as R_ARM_THM_MOVT_ABS. */
10918 case R_ARM_THM_MOVW_BREL_NC:
10919 case R_ARM_THM_MOVW_BREL:
10920 case R_ARM_THM_MOVT_BREL:
10921 {
10922 bfd_vma insn;
10923
10924 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10925 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10926
10927 if (globals->use_rel)
10928 {
10929 addend = ((insn >> 4) & 0xf000)
10930 | ((insn >> 15) & 0x0800)
10931 | ((insn >> 4) & 0x0700)
10932 | (insn & 0x00ff);
10933 signed_addend = (addend ^ 0x8000) - 0x8000;
10934 }
10935
10936 value += signed_addend;
10937
10938 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10939 value -= (input_section->output_section->vma
10940 + input_section->output_offset + rel->r_offset);
10941
10942 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10943 return bfd_reloc_overflow;
10944
10945 if (branch_type == ST_BRANCH_TO_THUMB)
10946 value |= 1;
10947
10948 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10949 || r_type == R_ARM_THM_MOVT_BREL)
10950 value >>= 16;
10951
10952 insn &= 0xfbf08f00;
10953 insn |= (value & 0xf000) << 4;
10954 insn |= (value & 0x0800) << 15;
10955 insn |= (value & 0x0700) << 4;
10956 insn |= (value & 0x00ff);
10957
10958 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10959 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10960 }
10961 return bfd_reloc_ok;
10962
10963 case R_ARM_ALU_PC_G0_NC:
10964 case R_ARM_ALU_PC_G1_NC:
10965 case R_ARM_ALU_PC_G0:
10966 case R_ARM_ALU_PC_G1:
10967 case R_ARM_ALU_PC_G2:
10968 case R_ARM_ALU_SB_G0_NC:
10969 case R_ARM_ALU_SB_G1_NC:
10970 case R_ARM_ALU_SB_G0:
10971 case R_ARM_ALU_SB_G1:
10972 case R_ARM_ALU_SB_G2:
10973 {
10974 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10975 bfd_vma pc = input_section->output_section->vma
10976 + input_section->output_offset + rel->r_offset;
10977 /* sb is the origin of the *segment* containing the symbol. */
10978 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10979 bfd_vma residual;
10980 bfd_vma g_n;
10981 bfd_signed_vma signed_value;
10982 int group = 0;
10983
10984 /* Determine which group of bits to select. */
10985 switch (r_type)
10986 {
10987 case R_ARM_ALU_PC_G0_NC:
10988 case R_ARM_ALU_PC_G0:
10989 case R_ARM_ALU_SB_G0_NC:
10990 case R_ARM_ALU_SB_G0:
10991 group = 0;
10992 break;
10993
10994 case R_ARM_ALU_PC_G1_NC:
10995 case R_ARM_ALU_PC_G1:
10996 case R_ARM_ALU_SB_G1_NC:
10997 case R_ARM_ALU_SB_G1:
10998 group = 1;
10999 break;
11000
11001 case R_ARM_ALU_PC_G2:
11002 case R_ARM_ALU_SB_G2:
11003 group = 2;
11004 break;
11005
11006 default:
11007 abort ();
11008 }
11009
11010 /* If REL, extract the addend from the insn. If RELA, it will
11011 have already been fetched for us. */
11012 if (globals->use_rel)
11013 {
11014 int negative;
11015 bfd_vma constant = insn & 0xff;
11016 bfd_vma rotation = (insn & 0xf00) >> 8;
11017
11018 if (rotation == 0)
11019 signed_addend = constant;
11020 else
11021 {
11022 /* Compensate for the fact that in the instruction, the
11023 rotation is stored in multiples of 2 bits. */
11024 rotation *= 2;
11025
11026 /* Rotate "constant" right by "rotation" bits. */
11027 signed_addend = (constant >> rotation) |
11028 (constant << (8 * sizeof (bfd_vma) - rotation));
11029 }
11030
11031 /* Determine if the instruction is an ADD or a SUB.
11032 (For REL, this determines the sign of the addend.) */
11033 negative = identify_add_or_sub (insn);
11034 if (negative == 0)
11035 {
11036 (*_bfd_error_handler)
11037 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
11038 input_bfd, input_section,
11039 (long) rel->r_offset, howto->name);
11040 return bfd_reloc_overflow;
11041 }
11042
11043 signed_addend *= negative;
11044 }
11045
11046 /* Compute the value (X) to go in the place. */
11047 if (r_type == R_ARM_ALU_PC_G0_NC
11048 || r_type == R_ARM_ALU_PC_G1_NC
11049 || r_type == R_ARM_ALU_PC_G0
11050 || r_type == R_ARM_ALU_PC_G1
11051 || r_type == R_ARM_ALU_PC_G2)
11052 /* PC relative. */
11053 signed_value = value - pc + signed_addend;
11054 else
11055 /* Section base relative. */
11056 signed_value = value - sb + signed_addend;
11057
11058 /* If the target symbol is a Thumb function, then set the
11059 Thumb bit in the address. */
11060 if (branch_type == ST_BRANCH_TO_THUMB)
11061 signed_value |= 1;
11062
11063 /* Calculate the value of the relevant G_n, in encoded
11064 constant-with-rotation format. */
11065 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11066 group, &residual);
11067
11068 /* Check for overflow if required. */
11069 if ((r_type == R_ARM_ALU_PC_G0
11070 || r_type == R_ARM_ALU_PC_G1
11071 || r_type == R_ARM_ALU_PC_G2
11072 || r_type == R_ARM_ALU_SB_G0
11073 || r_type == R_ARM_ALU_SB_G1
11074 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
11075 {
11076 (*_bfd_error_handler)
11077 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11078 input_bfd, input_section,
11079 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
11080 howto->name);
11081 return bfd_reloc_overflow;
11082 }
11083
11084 /* Mask out the value and the ADD/SUB part of the opcode; take care
11085 not to destroy the S bit. */
11086 insn &= 0xff1ff000;
11087
11088 /* Set the opcode according to whether the value to go in the
11089 place is negative. */
11090 if (signed_value < 0)
11091 insn |= 1 << 22;
11092 else
11093 insn |= 1 << 23;
11094
11095 /* Encode the offset. */
11096 insn |= g_n;
11097
11098 bfd_put_32 (input_bfd, insn, hit_data);
11099 }
11100 return bfd_reloc_ok;
11101
11102 case R_ARM_LDR_PC_G0:
11103 case R_ARM_LDR_PC_G1:
11104 case R_ARM_LDR_PC_G2:
11105 case R_ARM_LDR_SB_G0:
11106 case R_ARM_LDR_SB_G1:
11107 case R_ARM_LDR_SB_G2:
11108 {
11109 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11110 bfd_vma pc = input_section->output_section->vma
11111 + input_section->output_offset + rel->r_offset;
11112 /* sb is the origin of the *segment* containing the symbol. */
11113 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11114 bfd_vma residual;
11115 bfd_signed_vma signed_value;
11116 int group = 0;
11117
11118 /* Determine which groups of bits to calculate. */
11119 switch (r_type)
11120 {
11121 case R_ARM_LDR_PC_G0:
11122 case R_ARM_LDR_SB_G0:
11123 group = 0;
11124 break;
11125
11126 case R_ARM_LDR_PC_G1:
11127 case R_ARM_LDR_SB_G1:
11128 group = 1;
11129 break;
11130
11131 case R_ARM_LDR_PC_G2:
11132 case R_ARM_LDR_SB_G2:
11133 group = 2;
11134 break;
11135
11136 default:
11137 abort ();
11138 }
11139
11140 /* If REL, extract the addend from the insn. If RELA, it will
11141 have already been fetched for us. */
11142 if (globals->use_rel)
11143 {
11144 int negative = (insn & (1 << 23)) ? 1 : -1;
11145 signed_addend = negative * (insn & 0xfff);
11146 }
11147
11148 /* Compute the value (X) to go in the place. */
11149 if (r_type == R_ARM_LDR_PC_G0
11150 || r_type == R_ARM_LDR_PC_G1
11151 || r_type == R_ARM_LDR_PC_G2)
11152 /* PC relative. */
11153 signed_value = value - pc + signed_addend;
11154 else
11155 /* Section base relative. */
11156 signed_value = value - sb + signed_addend;
11157
11158 /* Calculate the value of the relevant G_{n-1} to obtain
11159 the residual at that stage. */
11160 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11161 group - 1, &residual);
11162
11163 /* Check for overflow. */
11164 if (residual >= 0x1000)
11165 {
11166 (*_bfd_error_handler)
11167 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11168 input_bfd, input_section,
11169 (long) rel->r_offset, labs (signed_value), howto->name);
11170 return bfd_reloc_overflow;
11171 }
11172
11173 /* Mask out the value and U bit. */
11174 insn &= 0xff7ff000;
11175
11176 /* Set the U bit if the value to go in the place is non-negative. */
11177 if (signed_value >= 0)
11178 insn |= 1 << 23;
11179
11180 /* Encode the offset. */
11181 insn |= residual;
11182
11183 bfd_put_32 (input_bfd, insn, hit_data);
11184 }
11185 return bfd_reloc_ok;
11186
11187 case R_ARM_LDRS_PC_G0:
11188 case R_ARM_LDRS_PC_G1:
11189 case R_ARM_LDRS_PC_G2:
11190 case R_ARM_LDRS_SB_G0:
11191 case R_ARM_LDRS_SB_G1:
11192 case R_ARM_LDRS_SB_G2:
11193 {
11194 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11195 bfd_vma pc = input_section->output_section->vma
11196 + input_section->output_offset + rel->r_offset;
11197 /* sb is the origin of the *segment* containing the symbol. */
11198 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11199 bfd_vma residual;
11200 bfd_signed_vma signed_value;
11201 int group = 0;
11202
11203 /* Determine which groups of bits to calculate. */
11204 switch (r_type)
11205 {
11206 case R_ARM_LDRS_PC_G0:
11207 case R_ARM_LDRS_SB_G0:
11208 group = 0;
11209 break;
11210
11211 case R_ARM_LDRS_PC_G1:
11212 case R_ARM_LDRS_SB_G1:
11213 group = 1;
11214 break;
11215
11216 case R_ARM_LDRS_PC_G2:
11217 case R_ARM_LDRS_SB_G2:
11218 group = 2;
11219 break;
11220
11221 default:
11222 abort ();
11223 }
11224
11225 /* If REL, extract the addend from the insn. If RELA, it will
11226 have already been fetched for us. */
11227 if (globals->use_rel)
11228 {
11229 int negative = (insn & (1 << 23)) ? 1 : -1;
11230 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11231 }
11232
11233 /* Compute the value (X) to go in the place. */
11234 if (r_type == R_ARM_LDRS_PC_G0
11235 || r_type == R_ARM_LDRS_PC_G1
11236 || r_type == R_ARM_LDRS_PC_G2)
11237 /* PC relative. */
11238 signed_value = value - pc + signed_addend;
11239 else
11240 /* Section base relative. */
11241 signed_value = value - sb + signed_addend;
11242
11243 /* Calculate the value of the relevant G_{n-1} to obtain
11244 the residual at that stage. */
11245 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11246 group - 1, &residual);
11247
11248 /* Check for overflow. */
11249 if (residual >= 0x100)
11250 {
11251 (*_bfd_error_handler)
11252 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11253 input_bfd, input_section,
11254 (long) rel->r_offset, labs (signed_value), howto->name);
11255 return bfd_reloc_overflow;
11256 }
11257
11258 /* Mask out the value and U bit. */
11259 insn &= 0xff7ff0f0;
11260
11261 /* Set the U bit if the value to go in the place is non-negative. */
11262 if (signed_value >= 0)
11263 insn |= 1 << 23;
11264
11265 /* Encode the offset. */
11266 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11267
11268 bfd_put_32 (input_bfd, insn, hit_data);
11269 }
11270 return bfd_reloc_ok;
11271
11272 case R_ARM_LDC_PC_G0:
11273 case R_ARM_LDC_PC_G1:
11274 case R_ARM_LDC_PC_G2:
11275 case R_ARM_LDC_SB_G0:
11276 case R_ARM_LDC_SB_G1:
11277 case R_ARM_LDC_SB_G2:
11278 {
11279 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11280 bfd_vma pc = input_section->output_section->vma
11281 + input_section->output_offset + rel->r_offset;
11282 /* sb is the origin of the *segment* containing the symbol. */
11283 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11284 bfd_vma residual;
11285 bfd_signed_vma signed_value;
11286 int group = 0;
11287
11288 /* Determine which groups of bits to calculate. */
11289 switch (r_type)
11290 {
11291 case R_ARM_LDC_PC_G0:
11292 case R_ARM_LDC_SB_G0:
11293 group = 0;
11294 break;
11295
11296 case R_ARM_LDC_PC_G1:
11297 case R_ARM_LDC_SB_G1:
11298 group = 1;
11299 break;
11300
11301 case R_ARM_LDC_PC_G2:
11302 case R_ARM_LDC_SB_G2:
11303 group = 2;
11304 break;
11305
11306 default:
11307 abort ();
11308 }
11309
11310 /* If REL, extract the addend from the insn. If RELA, it will
11311 have already been fetched for us. */
11312 if (globals->use_rel)
11313 {
11314 int negative = (insn & (1 << 23)) ? 1 : -1;
11315 signed_addend = negative * ((insn & 0xff) << 2);
11316 }
11317
11318 /* Compute the value (X) to go in the place. */
11319 if (r_type == R_ARM_LDC_PC_G0
11320 || r_type == R_ARM_LDC_PC_G1
11321 || r_type == R_ARM_LDC_PC_G2)
11322 /* PC relative. */
11323 signed_value = value - pc + signed_addend;
11324 else
11325 /* Section base relative. */
11326 signed_value = value - sb + signed_addend;
11327
11328 /* Calculate the value of the relevant G_{n-1} to obtain
11329 the residual at that stage. */
11330 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11331 group - 1, &residual);
11332
11333 /* Check for overflow. (The absolute value to go in the place must be
11334 divisible by four and, after having been divided by four, must
11335 fit in eight bits.) */
11336 if ((residual & 0x3) != 0 || residual >= 0x400)
11337 {
11338 (*_bfd_error_handler)
11339 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11340 input_bfd, input_section,
11341 (long) rel->r_offset, labs (signed_value), howto->name);
11342 return bfd_reloc_overflow;
11343 }
11344
11345 /* Mask out the value and U bit. */
11346 insn &= 0xff7fff00;
11347
11348 /* Set the U bit if the value to go in the place is non-negative. */
11349 if (signed_value >= 0)
11350 insn |= 1 << 23;
11351
11352 /* Encode the offset. */
11353 insn |= residual >> 2;
11354
11355 bfd_put_32 (input_bfd, insn, hit_data);
11356 }
11357 return bfd_reloc_ok;
11358
11359 case R_ARM_THM_ALU_ABS_G0_NC:
11360 case R_ARM_THM_ALU_ABS_G1_NC:
11361 case R_ARM_THM_ALU_ABS_G2_NC:
11362 case R_ARM_THM_ALU_ABS_G3_NC:
11363 {
11364 const int shift_array[4] = {0, 8, 16, 24};
11365 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11366 bfd_vma addr = value;
11367 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11368
11369 /* Compute address. */
11370 if (globals->use_rel)
11371 signed_addend = insn & 0xff;
11372 addr += signed_addend;
11373 if (branch_type == ST_BRANCH_TO_THUMB)
11374 addr |= 1;
11375 /* Clean imm8 insn. */
11376 insn &= 0xff00;
11377 /* And update with correct part of address. */
11378 insn |= (addr >> shift) & 0xff;
11379 /* Update insn. */
11380 bfd_put_16 (input_bfd, insn, hit_data);
11381 }
11382
11383 *unresolved_reloc_p = FALSE;
11384 return bfd_reloc_ok;
11385
11386 default:
11387 return bfd_reloc_notsupported;
11388 }
11389 }
11390
11391 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11392 static void
11393 arm_add_to_rel (bfd * abfd,
11394 bfd_byte * address,
11395 reloc_howto_type * howto,
11396 bfd_signed_vma increment)
11397 {
11398 bfd_signed_vma addend;
11399
11400 if (howto->type == R_ARM_THM_CALL
11401 || howto->type == R_ARM_THM_JUMP24)
11402 {
11403 int upper_insn, lower_insn;
11404 int upper, lower;
11405
11406 upper_insn = bfd_get_16 (abfd, address);
11407 lower_insn = bfd_get_16 (abfd, address + 2);
11408 upper = upper_insn & 0x7ff;
11409 lower = lower_insn & 0x7ff;
11410
11411 addend = (upper << 12) | (lower << 1);
11412 addend += increment;
11413 addend >>= 1;
11414
11415 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11416 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11417
11418 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11419 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11420 }
11421 else
11422 {
11423 bfd_vma contents;
11424
11425 contents = bfd_get_32 (abfd, address);
11426
11427 /* Get the (signed) value from the instruction. */
11428 addend = contents & howto->src_mask;
11429 if (addend & ((howto->src_mask + 1) >> 1))
11430 {
11431 bfd_signed_vma mask;
11432
11433 mask = -1;
11434 mask &= ~ howto->src_mask;
11435 addend |= mask;
11436 }
11437
11438 /* Add in the increment, (which is a byte value). */
11439 switch (howto->type)
11440 {
11441 default:
11442 addend += increment;
11443 break;
11444
11445 case R_ARM_PC24:
11446 case R_ARM_PLT32:
11447 case R_ARM_CALL:
11448 case R_ARM_JUMP24:
11449 addend <<= howto->size;
11450 addend += increment;
11451
11452 /* Should we check for overflow here ? */
11453
11454 /* Drop any undesired bits. */
11455 addend >>= howto->rightshift;
11456 break;
11457 }
11458
11459 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11460
11461 bfd_put_32 (abfd, contents, address);
11462 }
11463 }
11464
11465 #define IS_ARM_TLS_RELOC(R_TYPE) \
11466 ((R_TYPE) == R_ARM_TLS_GD32 \
11467 || (R_TYPE) == R_ARM_TLS_LDO32 \
11468 || (R_TYPE) == R_ARM_TLS_LDM32 \
11469 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11470 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11471 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11472 || (R_TYPE) == R_ARM_TLS_LE32 \
11473 || (R_TYPE) == R_ARM_TLS_IE32 \
11474 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11475
11476 /* Specific set of relocations for the gnu tls dialect. */
11477 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11478 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11479 || (R_TYPE) == R_ARM_TLS_CALL \
11480 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11481 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11482 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11483
11484 /* Relocate an ARM ELF section. */
11485
11486 static bfd_boolean
11487 elf32_arm_relocate_section (bfd * output_bfd,
11488 struct bfd_link_info * info,
11489 bfd * input_bfd,
11490 asection * input_section,
11491 bfd_byte * contents,
11492 Elf_Internal_Rela * relocs,
11493 Elf_Internal_Sym * local_syms,
11494 asection ** local_sections)
11495 {
11496 Elf_Internal_Shdr *symtab_hdr;
11497 struct elf_link_hash_entry **sym_hashes;
11498 Elf_Internal_Rela *rel;
11499 Elf_Internal_Rela *relend;
11500 const char *name;
11501 struct elf32_arm_link_hash_table * globals;
11502
11503 globals = elf32_arm_hash_table (info);
11504 if (globals == NULL)
11505 return FALSE;
11506
11507 symtab_hdr = & elf_symtab_hdr (input_bfd);
11508 sym_hashes = elf_sym_hashes (input_bfd);
11509
11510 rel = relocs;
11511 relend = relocs + input_section->reloc_count;
11512 for (; rel < relend; rel++)
11513 {
11514 int r_type;
11515 reloc_howto_type * howto;
11516 unsigned long r_symndx;
11517 Elf_Internal_Sym * sym;
11518 asection * sec;
11519 struct elf_link_hash_entry * h;
11520 bfd_vma relocation;
11521 bfd_reloc_status_type r;
11522 arelent bfd_reloc;
11523 char sym_type;
11524 bfd_boolean unresolved_reloc = FALSE;
11525 char *error_message = NULL;
11526
11527 r_symndx = ELF32_R_SYM (rel->r_info);
11528 r_type = ELF32_R_TYPE (rel->r_info);
11529 r_type = arm_real_reloc_type (globals, r_type);
11530
11531 if ( r_type == R_ARM_GNU_VTENTRY
11532 || r_type == R_ARM_GNU_VTINHERIT)
11533 continue;
11534
11535 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11536 howto = bfd_reloc.howto;
11537
11538 h = NULL;
11539 sym = NULL;
11540 sec = NULL;
11541
11542 if (r_symndx < symtab_hdr->sh_info)
11543 {
11544 sym = local_syms + r_symndx;
11545 sym_type = ELF32_ST_TYPE (sym->st_info);
11546 sec = local_sections[r_symndx];
11547
11548 /* An object file might have a reference to a local
11549 undefined symbol. This is a daft object file, but we
11550 should at least do something about it. V4BX & NONE
11551 relocations do not use the symbol and are explicitly
11552 allowed to use the undefined symbol, so allow those.
11553 Likewise for relocations against STN_UNDEF. */
11554 if (r_type != R_ARM_V4BX
11555 && r_type != R_ARM_NONE
11556 && r_symndx != STN_UNDEF
11557 && bfd_is_und_section (sec)
11558 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11559 (*info->callbacks->undefined_symbol)
11560 (info, bfd_elf_string_from_elf_section
11561 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11562 input_bfd, input_section,
11563 rel->r_offset, TRUE);
11564
11565 if (globals->use_rel)
11566 {
11567 relocation = (sec->output_section->vma
11568 + sec->output_offset
11569 + sym->st_value);
11570 if (!bfd_link_relocatable (info)
11571 && (sec->flags & SEC_MERGE)
11572 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11573 {
11574 asection *msec;
11575 bfd_vma addend, value;
11576
11577 switch (r_type)
11578 {
11579 case R_ARM_MOVW_ABS_NC:
11580 case R_ARM_MOVT_ABS:
11581 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11582 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11583 addend = (addend ^ 0x8000) - 0x8000;
11584 break;
11585
11586 case R_ARM_THM_MOVW_ABS_NC:
11587 case R_ARM_THM_MOVT_ABS:
11588 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11589 << 16;
11590 value |= bfd_get_16 (input_bfd,
11591 contents + rel->r_offset + 2);
11592 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11593 | ((value & 0x04000000) >> 15);
11594 addend = (addend ^ 0x8000) - 0x8000;
11595 break;
11596
11597 default:
11598 if (howto->rightshift
11599 || (howto->src_mask & (howto->src_mask + 1)))
11600 {
11601 (*_bfd_error_handler)
11602 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11603 input_bfd, input_section,
11604 (long) rel->r_offset, howto->name);
11605 return FALSE;
11606 }
11607
11608 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11609
11610 /* Get the (signed) value from the instruction. */
11611 addend = value & howto->src_mask;
11612 if (addend & ((howto->src_mask + 1) >> 1))
11613 {
11614 bfd_signed_vma mask;
11615
11616 mask = -1;
11617 mask &= ~ howto->src_mask;
11618 addend |= mask;
11619 }
11620 break;
11621 }
11622
11623 msec = sec;
11624 addend =
11625 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11626 - relocation;
11627 addend += msec->output_section->vma + msec->output_offset;
11628
11629 /* Cases here must match those in the preceding
11630 switch statement. */
11631 switch (r_type)
11632 {
11633 case R_ARM_MOVW_ABS_NC:
11634 case R_ARM_MOVT_ABS:
11635 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11636 | (addend & 0xfff);
11637 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11638 break;
11639
11640 case R_ARM_THM_MOVW_ABS_NC:
11641 case R_ARM_THM_MOVT_ABS:
11642 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11643 | (addend & 0xff) | ((addend & 0x0800) << 15);
11644 bfd_put_16 (input_bfd, value >> 16,
11645 contents + rel->r_offset);
11646 bfd_put_16 (input_bfd, value,
11647 contents + rel->r_offset + 2);
11648 break;
11649
11650 default:
11651 value = (value & ~ howto->dst_mask)
11652 | (addend & howto->dst_mask);
11653 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11654 break;
11655 }
11656 }
11657 }
11658 else
11659 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11660 }
11661 else
11662 {
11663 bfd_boolean warned, ignored;
11664
11665 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11666 r_symndx, symtab_hdr, sym_hashes,
11667 h, sec, relocation,
11668 unresolved_reloc, warned, ignored);
11669
11670 sym_type = h->type;
11671 }
11672
11673 if (sec != NULL && discarded_section (sec))
11674 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11675 rel, 1, relend, howto, 0, contents);
11676
11677 if (bfd_link_relocatable (info))
11678 {
11679 /* This is a relocatable link. We don't have to change
11680 anything, unless the reloc is against a section symbol,
11681 in which case we have to adjust according to where the
11682 section symbol winds up in the output section. */
11683 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11684 {
11685 if (globals->use_rel)
11686 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11687 howto, (bfd_signed_vma) sec->output_offset);
11688 else
11689 rel->r_addend += sec->output_offset;
11690 }
11691 continue;
11692 }
11693
11694 if (h != NULL)
11695 name = h->root.root.string;
11696 else
11697 {
11698 name = (bfd_elf_string_from_elf_section
11699 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11700 if (name == NULL || *name == '\0')
11701 name = bfd_section_name (input_bfd, sec);
11702 }
11703
11704 if (r_symndx != STN_UNDEF
11705 && r_type != R_ARM_NONE
11706 && (h == NULL
11707 || h->root.type == bfd_link_hash_defined
11708 || h->root.type == bfd_link_hash_defweak)
11709 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11710 {
11711 (*_bfd_error_handler)
11712 ((sym_type == STT_TLS
11713 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11714 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11715 input_bfd,
11716 input_section,
11717 (long) rel->r_offset,
11718 howto->name,
11719 name);
11720 }
11721
11722 /* We call elf32_arm_final_link_relocate unless we're completely
11723 done, i.e., the relaxation produced the final output we want,
11724 and we won't let anybody mess with it. Also, we have to do
11725 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11726 both in relaxed and non-relaxed cases. */
11727 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11728 || (IS_ARM_TLS_GNU_RELOC (r_type)
11729 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11730 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11731 & GOT_TLS_GDESC)))
11732 {
11733 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11734 contents, rel, h == NULL);
11735 /* This may have been marked unresolved because it came from
11736 a shared library. But we've just dealt with that. */
11737 unresolved_reloc = 0;
11738 }
11739 else
11740 r = bfd_reloc_continue;
11741
11742 if (r == bfd_reloc_continue)
11743 {
11744 unsigned char branch_type =
11745 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11746 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11747
11748 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11749 input_section, contents, rel,
11750 relocation, info, sec, name,
11751 sym_type, branch_type, h,
11752 &unresolved_reloc,
11753 &error_message);
11754 }
11755
11756 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11757 because such sections are not SEC_ALLOC and thus ld.so will
11758 not process them. */
11759 if (unresolved_reloc
11760 && !((input_section->flags & SEC_DEBUGGING) != 0
11761 && h->def_dynamic)
11762 && _bfd_elf_section_offset (output_bfd, info, input_section,
11763 rel->r_offset) != (bfd_vma) -1)
11764 {
11765 (*_bfd_error_handler)
11766 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11767 input_bfd,
11768 input_section,
11769 (long) rel->r_offset,
11770 howto->name,
11771 h->root.root.string);
11772 return FALSE;
11773 }
11774
11775 if (r != bfd_reloc_ok)
11776 {
11777 switch (r)
11778 {
11779 case bfd_reloc_overflow:
11780 /* If the overflowing reloc was to an undefined symbol,
11781 we have already printed one error message and there
11782 is no point complaining again. */
11783 if (!h || h->root.type != bfd_link_hash_undefined)
11784 (*info->callbacks->reloc_overflow)
11785 (info, (h ? &h->root : NULL), name, howto->name,
11786 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
11787 break;
11788
11789 case bfd_reloc_undefined:
11790 (*info->callbacks->undefined_symbol)
11791 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
11792 break;
11793
11794 case bfd_reloc_outofrange:
11795 error_message = _("out of range");
11796 goto common_error;
11797
11798 case bfd_reloc_notsupported:
11799 error_message = _("unsupported relocation");
11800 goto common_error;
11801
11802 case bfd_reloc_dangerous:
11803 /* error_message should already be set. */
11804 goto common_error;
11805
11806 default:
11807 error_message = _("unknown error");
11808 /* Fall through. */
11809
11810 common_error:
11811 BFD_ASSERT (error_message != NULL);
11812 (*info->callbacks->reloc_dangerous)
11813 (info, error_message, input_bfd, input_section, rel->r_offset);
11814 break;
11815 }
11816 }
11817 }
11818
11819 return TRUE;
11820 }
11821
11822 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11823 adds the edit to the start of the list. (The list must be built in order of
11824 ascending TINDEX: the function's callers are primarily responsible for
11825 maintaining that condition). */
11826
11827 static void
11828 add_unwind_table_edit (arm_unwind_table_edit **head,
11829 arm_unwind_table_edit **tail,
11830 arm_unwind_edit_type type,
11831 asection *linked_section,
11832 unsigned int tindex)
11833 {
11834 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11835 xmalloc (sizeof (arm_unwind_table_edit));
11836
11837 new_edit->type = type;
11838 new_edit->linked_section = linked_section;
11839 new_edit->index = tindex;
11840
11841 if (tindex > 0)
11842 {
11843 new_edit->next = NULL;
11844
11845 if (*tail)
11846 (*tail)->next = new_edit;
11847
11848 (*tail) = new_edit;
11849
11850 if (!*head)
11851 (*head) = new_edit;
11852 }
11853 else
11854 {
11855 new_edit->next = *head;
11856
11857 if (!*tail)
11858 *tail = new_edit;
11859
11860 *head = new_edit;
11861 }
11862 }
11863
11864 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11865
11866 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11867 static void
11868 adjust_exidx_size(asection *exidx_sec, int adjust)
11869 {
11870 asection *out_sec;
11871
11872 if (!exidx_sec->rawsize)
11873 exidx_sec->rawsize = exidx_sec->size;
11874
11875 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11876 out_sec = exidx_sec->output_section;
11877 /* Adjust size of output section. */
11878 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11879 }
11880
11881 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11882 static void
11883 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11884 {
11885 struct _arm_elf_section_data *exidx_arm_data;
11886
11887 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11888 add_unwind_table_edit (
11889 &exidx_arm_data->u.exidx.unwind_edit_list,
11890 &exidx_arm_data->u.exidx.unwind_edit_tail,
11891 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11892
11893 exidx_arm_data->additional_reloc_count++;
11894
11895 adjust_exidx_size(exidx_sec, 8);
11896 }
11897
11898 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11899 made to those tables, such that:
11900
11901 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11902 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11903 codes which have been inlined into the index).
11904
11905 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11906
11907 The edits are applied when the tables are written
11908 (in elf32_arm_write_section). */
11909
11910 bfd_boolean
11911 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11912 unsigned int num_text_sections,
11913 struct bfd_link_info *info,
11914 bfd_boolean merge_exidx_entries)
11915 {
11916 bfd *inp;
11917 unsigned int last_second_word = 0, i;
11918 asection *last_exidx_sec = NULL;
11919 asection *last_text_sec = NULL;
11920 int last_unwind_type = -1;
11921
11922 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11923 text sections. */
11924 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11925 {
11926 asection *sec;
11927
11928 for (sec = inp->sections; sec != NULL; sec = sec->next)
11929 {
11930 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11931 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11932
11933 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11934 continue;
11935
11936 if (elf_sec->linked_to)
11937 {
11938 Elf_Internal_Shdr *linked_hdr
11939 = &elf_section_data (elf_sec->linked_to)->this_hdr;
11940 struct _arm_elf_section_data *linked_sec_arm_data
11941 = get_arm_elf_section_data (linked_hdr->bfd_section);
11942
11943 if (linked_sec_arm_data == NULL)
11944 continue;
11945
11946 /* Link this .ARM.exidx section back from the text section it
11947 describes. */
11948 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11949 }
11950 }
11951 }
11952
11953 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11954 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11955 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11956
11957 for (i = 0; i < num_text_sections; i++)
11958 {
11959 asection *sec = text_section_order[i];
11960 asection *exidx_sec;
11961 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11962 struct _arm_elf_section_data *exidx_arm_data;
11963 bfd_byte *contents = NULL;
11964 int deleted_exidx_bytes = 0;
11965 bfd_vma j;
11966 arm_unwind_table_edit *unwind_edit_head = NULL;
11967 arm_unwind_table_edit *unwind_edit_tail = NULL;
11968 Elf_Internal_Shdr *hdr;
11969 bfd *ibfd;
11970
11971 if (arm_data == NULL)
11972 continue;
11973
11974 exidx_sec = arm_data->u.text.arm_exidx_sec;
11975 if (exidx_sec == NULL)
11976 {
11977 /* Section has no unwind data. */
11978 if (last_unwind_type == 0 || !last_exidx_sec)
11979 continue;
11980
11981 /* Ignore zero sized sections. */
11982 if (sec->size == 0)
11983 continue;
11984
11985 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11986 last_unwind_type = 0;
11987 continue;
11988 }
11989
11990 /* Skip /DISCARD/ sections. */
11991 if (bfd_is_abs_section (exidx_sec->output_section))
11992 continue;
11993
11994 hdr = &elf_section_data (exidx_sec)->this_hdr;
11995 if (hdr->sh_type != SHT_ARM_EXIDX)
11996 continue;
11997
11998 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11999 if (exidx_arm_data == NULL)
12000 continue;
12001
12002 ibfd = exidx_sec->owner;
12003
12004 if (hdr->contents != NULL)
12005 contents = hdr->contents;
12006 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
12007 /* An error? */
12008 continue;
12009
12010 if (last_unwind_type > 0)
12011 {
12012 unsigned int first_word = bfd_get_32 (ibfd, contents);
12013 /* Add cantunwind if first unwind item does not match section
12014 start. */
12015 if (first_word != sec->vma)
12016 {
12017 insert_cantunwind_after (last_text_sec, last_exidx_sec);
12018 last_unwind_type = 0;
12019 }
12020 }
12021
12022 for (j = 0; j < hdr->sh_size; j += 8)
12023 {
12024 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
12025 int unwind_type;
12026 int elide = 0;
12027
12028 /* An EXIDX_CANTUNWIND entry. */
12029 if (second_word == 1)
12030 {
12031 if (last_unwind_type == 0)
12032 elide = 1;
12033 unwind_type = 0;
12034 }
12035 /* Inlined unwinding data. Merge if equal to previous. */
12036 else if ((second_word & 0x80000000) != 0)
12037 {
12038 if (merge_exidx_entries
12039 && last_second_word == second_word && last_unwind_type == 1)
12040 elide = 1;
12041 unwind_type = 1;
12042 last_second_word = second_word;
12043 }
12044 /* Normal table entry. In theory we could merge these too,
12045 but duplicate entries are likely to be much less common. */
12046 else
12047 unwind_type = 2;
12048
12049 if (elide && !bfd_link_relocatable (info))
12050 {
12051 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
12052 DELETE_EXIDX_ENTRY, NULL, j / 8);
12053
12054 deleted_exidx_bytes += 8;
12055 }
12056
12057 last_unwind_type = unwind_type;
12058 }
12059
12060 /* Free contents if we allocated it ourselves. */
12061 if (contents != hdr->contents)
12062 free (contents);
12063
12064 /* Record edits to be applied later (in elf32_arm_write_section). */
12065 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
12066 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
12067
12068 if (deleted_exidx_bytes > 0)
12069 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12070
12071 last_exidx_sec = exidx_sec;
12072 last_text_sec = sec;
12073 }
12074
12075 /* Add terminating CANTUNWIND entry. */
12076 if (!bfd_link_relocatable (info) && last_exidx_sec
12077 && last_unwind_type != 0)
12078 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12079
12080 return TRUE;
12081 }
12082
12083 static bfd_boolean
12084 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12085 bfd *ibfd, const char *name)
12086 {
12087 asection *sec, *osec;
12088
12089 sec = bfd_get_linker_section (ibfd, name);
12090 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12091 return TRUE;
12092
12093 osec = sec->output_section;
12094 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12095 return TRUE;
12096
12097 if (! bfd_set_section_contents (obfd, osec, sec->contents,
12098 sec->output_offset, sec->size))
12099 return FALSE;
12100
12101 return TRUE;
12102 }
12103
12104 static bfd_boolean
12105 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12106 {
12107 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12108 asection *sec, *osec;
12109
12110 if (globals == NULL)
12111 return FALSE;
12112
12113 /* Invoke the regular ELF backend linker to do all the work. */
12114 if (!bfd_elf_final_link (abfd, info))
12115 return FALSE;
12116
12117 /* Process stub sections (eg BE8 encoding, ...). */
12118 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12119 unsigned int i;
12120 for (i=0; i<htab->top_id; i++)
12121 {
12122 sec = htab->stub_group[i].stub_sec;
12123 /* Only process it once, in its link_sec slot. */
12124 if (sec && i == htab->stub_group[i].link_sec->id)
12125 {
12126 osec = sec->output_section;
12127 elf32_arm_write_section (abfd, info, sec, sec->contents);
12128 if (! bfd_set_section_contents (abfd, osec, sec->contents,
12129 sec->output_offset, sec->size))
12130 return FALSE;
12131 }
12132 }
12133
12134 /* Write out any glue sections now that we have created all the
12135 stubs. */
12136 if (globals->bfd_of_glue_owner != NULL)
12137 {
12138 if (! elf32_arm_output_glue_section (info, abfd,
12139 globals->bfd_of_glue_owner,
12140 ARM2THUMB_GLUE_SECTION_NAME))
12141 return FALSE;
12142
12143 if (! elf32_arm_output_glue_section (info, abfd,
12144 globals->bfd_of_glue_owner,
12145 THUMB2ARM_GLUE_SECTION_NAME))
12146 return FALSE;
12147
12148 if (! elf32_arm_output_glue_section (info, abfd,
12149 globals->bfd_of_glue_owner,
12150 VFP11_ERRATUM_VENEER_SECTION_NAME))
12151 return FALSE;
12152
12153 if (! elf32_arm_output_glue_section (info, abfd,
12154 globals->bfd_of_glue_owner,
12155 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12156 return FALSE;
12157
12158 if (! elf32_arm_output_glue_section (info, abfd,
12159 globals->bfd_of_glue_owner,
12160 ARM_BX_GLUE_SECTION_NAME))
12161 return FALSE;
12162 }
12163
12164 return TRUE;
12165 }
12166
12167 /* Return a best guess for the machine number based on the attributes. */
12168
12169 static unsigned int
12170 bfd_arm_get_mach_from_attributes (bfd * abfd)
12171 {
12172 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12173
12174 switch (arch)
12175 {
12176 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12177 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12178 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12179
12180 case TAG_CPU_ARCH_V5TE:
12181 {
12182 char * name;
12183
12184 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12185 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12186
12187 if (name)
12188 {
12189 if (strcmp (name, "IWMMXT2") == 0)
12190 return bfd_mach_arm_iWMMXt2;
12191
12192 if (strcmp (name, "IWMMXT") == 0)
12193 return bfd_mach_arm_iWMMXt;
12194
12195 if (strcmp (name, "XSCALE") == 0)
12196 {
12197 int wmmx;
12198
12199 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12200 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12201 switch (wmmx)
12202 {
12203 case 1: return bfd_mach_arm_iWMMXt;
12204 case 2: return bfd_mach_arm_iWMMXt2;
12205 default: return bfd_mach_arm_XScale;
12206 }
12207 }
12208 }
12209
12210 return bfd_mach_arm_5TE;
12211 }
12212
12213 default:
12214 return bfd_mach_arm_unknown;
12215 }
12216 }
12217
12218 /* Set the right machine number. */
12219
12220 static bfd_boolean
12221 elf32_arm_object_p (bfd *abfd)
12222 {
12223 unsigned int mach;
12224
12225 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12226
12227 if (mach == bfd_mach_arm_unknown)
12228 {
12229 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12230 mach = bfd_mach_arm_ep9312;
12231 else
12232 mach = bfd_arm_get_mach_from_attributes (abfd);
12233 }
12234
12235 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12236 return TRUE;
12237 }
12238
12239 /* Function to keep ARM specific flags in the ELF header. */
12240
12241 static bfd_boolean
12242 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12243 {
12244 if (elf_flags_init (abfd)
12245 && elf_elfheader (abfd)->e_flags != flags)
12246 {
12247 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12248 {
12249 if (flags & EF_ARM_INTERWORK)
12250 (*_bfd_error_handler)
12251 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12252 abfd);
12253 else
12254 _bfd_error_handler
12255 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12256 abfd);
12257 }
12258 }
12259 else
12260 {
12261 elf_elfheader (abfd)->e_flags = flags;
12262 elf_flags_init (abfd) = TRUE;
12263 }
12264
12265 return TRUE;
12266 }
12267
12268 /* Copy backend specific data from one object module to another. */
12269
12270 static bfd_boolean
12271 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12272 {
12273 flagword in_flags;
12274 flagword out_flags;
12275
12276 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12277 return TRUE;
12278
12279 in_flags = elf_elfheader (ibfd)->e_flags;
12280 out_flags = elf_elfheader (obfd)->e_flags;
12281
12282 if (elf_flags_init (obfd)
12283 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12284 && in_flags != out_flags)
12285 {
12286 /* Cannot mix APCS26 and APCS32 code. */
12287 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12288 return FALSE;
12289
12290 /* Cannot mix float APCS and non-float APCS code. */
12291 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12292 return FALSE;
12293
12294 /* If the src and dest have different interworking flags
12295 then turn off the interworking bit. */
12296 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12297 {
12298 if (out_flags & EF_ARM_INTERWORK)
12299 _bfd_error_handler
12300 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12301 obfd, ibfd);
12302
12303 in_flags &= ~EF_ARM_INTERWORK;
12304 }
12305
12306 /* Likewise for PIC, though don't warn for this case. */
12307 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12308 in_flags &= ~EF_ARM_PIC;
12309 }
12310
12311 elf_elfheader (obfd)->e_flags = in_flags;
12312 elf_flags_init (obfd) = TRUE;
12313
12314 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12315 }
12316
12317 /* Values for Tag_ABI_PCS_R9_use. */
12318 enum
12319 {
12320 AEABI_R9_V6,
12321 AEABI_R9_SB,
12322 AEABI_R9_TLS,
12323 AEABI_R9_unused
12324 };
12325
12326 /* Values for Tag_ABI_PCS_RW_data. */
12327 enum
12328 {
12329 AEABI_PCS_RW_data_absolute,
12330 AEABI_PCS_RW_data_PCrel,
12331 AEABI_PCS_RW_data_SBrel,
12332 AEABI_PCS_RW_data_unused
12333 };
12334
12335 /* Values for Tag_ABI_enum_size. */
12336 enum
12337 {
12338 AEABI_enum_unused,
12339 AEABI_enum_short,
12340 AEABI_enum_wide,
12341 AEABI_enum_forced_wide
12342 };
12343
12344 /* Determine whether an object attribute tag takes an integer, a
12345 string or both. */
12346
12347 static int
12348 elf32_arm_obj_attrs_arg_type (int tag)
12349 {
12350 if (tag == Tag_compatibility)
12351 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12352 else if (tag == Tag_nodefaults)
12353 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12354 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12355 return ATTR_TYPE_FLAG_STR_VAL;
12356 else if (tag < 32)
12357 return ATTR_TYPE_FLAG_INT_VAL;
12358 else
12359 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12360 }
12361
12362 /* The ABI defines that Tag_conformance should be emitted first, and that
12363 Tag_nodefaults should be second (if either is defined). This sets those
12364 two positions, and bumps up the position of all the remaining tags to
12365 compensate. */
12366 static int
12367 elf32_arm_obj_attrs_order (int num)
12368 {
12369 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12370 return Tag_conformance;
12371 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12372 return Tag_nodefaults;
12373 if ((num - 2) < Tag_nodefaults)
12374 return num - 2;
12375 if ((num - 1) < Tag_conformance)
12376 return num - 1;
12377 return num;
12378 }
12379
12380 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12381 static bfd_boolean
12382 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12383 {
12384 if ((tag & 127) < 64)
12385 {
12386 _bfd_error_handler
12387 (_("%B: Unknown mandatory EABI object attribute %d"),
12388 abfd, tag);
12389 bfd_set_error (bfd_error_bad_value);
12390 return FALSE;
12391 }
12392 else
12393 {
12394 _bfd_error_handler
12395 (_("Warning: %B: Unknown EABI object attribute %d"),
12396 abfd, tag);
12397 return TRUE;
12398 }
12399 }
12400
12401 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12402 Returns -1 if no architecture could be read. */
12403
12404 static int
12405 get_secondary_compatible_arch (bfd *abfd)
12406 {
12407 obj_attribute *attr =
12408 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12409
12410 /* Note: the tag and its argument below are uleb128 values, though
12411 currently-defined values fit in one byte for each. */
12412 if (attr->s
12413 && attr->s[0] == Tag_CPU_arch
12414 && (attr->s[1] & 128) != 128
12415 && attr->s[2] == 0)
12416 return attr->s[1];
12417
12418 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12419 return -1;
12420 }
12421
12422 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12423 The tag is removed if ARCH is -1. */
12424
12425 static void
12426 set_secondary_compatible_arch (bfd *abfd, int arch)
12427 {
12428 obj_attribute *attr =
12429 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12430
12431 if (arch == -1)
12432 {
12433 attr->s = NULL;
12434 return;
12435 }
12436
12437 /* Note: the tag and its argument below are uleb128 values, though
12438 currently-defined values fit in one byte for each. */
12439 if (!attr->s)
12440 attr->s = (char *) bfd_alloc (abfd, 3);
12441 attr->s[0] = Tag_CPU_arch;
12442 attr->s[1] = arch;
12443 attr->s[2] = '\0';
12444 }
12445
12446 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12447 into account. */
12448
12449 static int
12450 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12451 int newtag, int secondary_compat)
12452 {
12453 #define T(X) TAG_CPU_ARCH_##X
12454 int tagl, tagh, result;
12455 const int v6t2[] =
12456 {
12457 T(V6T2), /* PRE_V4. */
12458 T(V6T2), /* V4. */
12459 T(V6T2), /* V4T. */
12460 T(V6T2), /* V5T. */
12461 T(V6T2), /* V5TE. */
12462 T(V6T2), /* V5TEJ. */
12463 T(V6T2), /* V6. */
12464 T(V7), /* V6KZ. */
12465 T(V6T2) /* V6T2. */
12466 };
12467 const int v6k[] =
12468 {
12469 T(V6K), /* PRE_V4. */
12470 T(V6K), /* V4. */
12471 T(V6K), /* V4T. */
12472 T(V6K), /* V5T. */
12473 T(V6K), /* V5TE. */
12474 T(V6K), /* V5TEJ. */
12475 T(V6K), /* V6. */
12476 T(V6KZ), /* V6KZ. */
12477 T(V7), /* V6T2. */
12478 T(V6K) /* V6K. */
12479 };
12480 const int v7[] =
12481 {
12482 T(V7), /* PRE_V4. */
12483 T(V7), /* V4. */
12484 T(V7), /* V4T. */
12485 T(V7), /* V5T. */
12486 T(V7), /* V5TE. */
12487 T(V7), /* V5TEJ. */
12488 T(V7), /* V6. */
12489 T(V7), /* V6KZ. */
12490 T(V7), /* V6T2. */
12491 T(V7), /* V6K. */
12492 T(V7) /* V7. */
12493 };
12494 const int v6_m[] =
12495 {
12496 -1, /* PRE_V4. */
12497 -1, /* V4. */
12498 T(V6K), /* V4T. */
12499 T(V6K), /* V5T. */
12500 T(V6K), /* V5TE. */
12501 T(V6K), /* V5TEJ. */
12502 T(V6K), /* V6. */
12503 T(V6KZ), /* V6KZ. */
12504 T(V7), /* V6T2. */
12505 T(V6K), /* V6K. */
12506 T(V7), /* V7. */
12507 T(V6_M) /* V6_M. */
12508 };
12509 const int v6s_m[] =
12510 {
12511 -1, /* PRE_V4. */
12512 -1, /* V4. */
12513 T(V6K), /* V4T. */
12514 T(V6K), /* V5T. */
12515 T(V6K), /* V5TE. */
12516 T(V6K), /* V5TEJ. */
12517 T(V6K), /* V6. */
12518 T(V6KZ), /* V6KZ. */
12519 T(V7), /* V6T2. */
12520 T(V6K), /* V6K. */
12521 T(V7), /* V7. */
12522 T(V6S_M), /* V6_M. */
12523 T(V6S_M) /* V6S_M. */
12524 };
12525 const int v7e_m[] =
12526 {
12527 -1, /* PRE_V4. */
12528 -1, /* V4. */
12529 T(V7E_M), /* V4T. */
12530 T(V7E_M), /* V5T. */
12531 T(V7E_M), /* V5TE. */
12532 T(V7E_M), /* V5TEJ. */
12533 T(V7E_M), /* V6. */
12534 T(V7E_M), /* V6KZ. */
12535 T(V7E_M), /* V6T2. */
12536 T(V7E_M), /* V6K. */
12537 T(V7E_M), /* V7. */
12538 T(V7E_M), /* V6_M. */
12539 T(V7E_M), /* V6S_M. */
12540 T(V7E_M) /* V7E_M. */
12541 };
12542 const int v8[] =
12543 {
12544 T(V8), /* PRE_V4. */
12545 T(V8), /* V4. */
12546 T(V8), /* V4T. */
12547 T(V8), /* V5T. */
12548 T(V8), /* V5TE. */
12549 T(V8), /* V5TEJ. */
12550 T(V8), /* V6. */
12551 T(V8), /* V6KZ. */
12552 T(V8), /* V6T2. */
12553 T(V8), /* V6K. */
12554 T(V8), /* V7. */
12555 T(V8), /* V6_M. */
12556 T(V8), /* V6S_M. */
12557 T(V8), /* V7E_M. */
12558 T(V8) /* V8. */
12559 };
12560 const int v8m_baseline[] =
12561 {
12562 -1, /* PRE_V4. */
12563 -1, /* V4. */
12564 -1, /* V4T. */
12565 -1, /* V5T. */
12566 -1, /* V5TE. */
12567 -1, /* V5TEJ. */
12568 -1, /* V6. */
12569 -1, /* V6KZ. */
12570 -1, /* V6T2. */
12571 -1, /* V6K. */
12572 -1, /* V7. */
12573 T(V8M_BASE), /* V6_M. */
12574 T(V8M_BASE), /* V6S_M. */
12575 -1, /* V7E_M. */
12576 -1, /* V8. */
12577 -1,
12578 T(V8M_BASE) /* V8-M BASELINE. */
12579 };
12580 const int v8m_mainline[] =
12581 {
12582 -1, /* PRE_V4. */
12583 -1, /* V4. */
12584 -1, /* V4T. */
12585 -1, /* V5T. */
12586 -1, /* V5TE. */
12587 -1, /* V5TEJ. */
12588 -1, /* V6. */
12589 -1, /* V6KZ. */
12590 -1, /* V6T2. */
12591 -1, /* V6K. */
12592 T(V8M_MAIN), /* V7. */
12593 T(V8M_MAIN), /* V6_M. */
12594 T(V8M_MAIN), /* V6S_M. */
12595 T(V8M_MAIN), /* V7E_M. */
12596 -1, /* V8. */
12597 -1,
12598 T(V8M_MAIN), /* V8-M BASELINE. */
12599 T(V8M_MAIN) /* V8-M MAINLINE. */
12600 };
12601 const int v4t_plus_v6_m[] =
12602 {
12603 -1, /* PRE_V4. */
12604 -1, /* V4. */
12605 T(V4T), /* V4T. */
12606 T(V5T), /* V5T. */
12607 T(V5TE), /* V5TE. */
12608 T(V5TEJ), /* V5TEJ. */
12609 T(V6), /* V6. */
12610 T(V6KZ), /* V6KZ. */
12611 T(V6T2), /* V6T2. */
12612 T(V6K), /* V6K. */
12613 T(V7), /* V7. */
12614 T(V6_M), /* V6_M. */
12615 T(V6S_M), /* V6S_M. */
12616 T(V7E_M), /* V7E_M. */
12617 T(V8), /* V8. */
12618 -1, /* Unused. */
12619 T(V8M_BASE), /* V8-M BASELINE. */
12620 T(V8M_MAIN), /* V8-M MAINLINE. */
12621 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12622 };
12623 const int *comb[] =
12624 {
12625 v6t2,
12626 v6k,
12627 v7,
12628 v6_m,
12629 v6s_m,
12630 v7e_m,
12631 v8,
12632 NULL,
12633 v8m_baseline,
12634 v8m_mainline,
12635 /* Pseudo-architecture. */
12636 v4t_plus_v6_m
12637 };
12638
12639 /* Check we've not got a higher architecture than we know about. */
12640
12641 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12642 {
12643 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12644 return -1;
12645 }
12646
12647 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12648
12649 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12650 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12651 oldtag = T(V4T_PLUS_V6_M);
12652
12653 /* And override the new tag if we have a Tag_also_compatible_with on the
12654 input. */
12655
12656 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12657 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12658 newtag = T(V4T_PLUS_V6_M);
12659
12660 tagl = (oldtag < newtag) ? oldtag : newtag;
12661 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12662
12663 /* Architectures before V6KZ add features monotonically. */
12664 if (tagh <= TAG_CPU_ARCH_V6KZ)
12665 return result;
12666
12667 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12668
12669 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12670 as the canonical version. */
12671 if (result == T(V4T_PLUS_V6_M))
12672 {
12673 result = T(V4T);
12674 *secondary_compat_out = T(V6_M);
12675 }
12676 else
12677 *secondary_compat_out = -1;
12678
12679 if (result == -1)
12680 {
12681 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12682 ibfd, oldtag, newtag);
12683 return -1;
12684 }
12685
12686 return result;
12687 #undef T
12688 }
12689
12690 /* Query attributes object to see if integer divide instructions may be
12691 present in an object. */
12692 static bfd_boolean
12693 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12694 {
12695 int arch = attr[Tag_CPU_arch].i;
12696 int profile = attr[Tag_CPU_arch_profile].i;
12697
12698 switch (attr[Tag_DIV_use].i)
12699 {
12700 case 0:
12701 /* Integer divide allowed if instruction contained in archetecture. */
12702 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12703 return TRUE;
12704 else if (arch >= TAG_CPU_ARCH_V7E_M)
12705 return TRUE;
12706 else
12707 return FALSE;
12708
12709 case 1:
12710 /* Integer divide explicitly prohibited. */
12711 return FALSE;
12712
12713 default:
12714 /* Unrecognised case - treat as allowing divide everywhere. */
12715 case 2:
12716 /* Integer divide allowed in ARM state. */
12717 return TRUE;
12718 }
12719 }
12720
12721 /* Query attributes object to see if integer divide instructions are
12722 forbidden to be in the object. This is not the inverse of
12723 elf32_arm_attributes_accept_div. */
12724 static bfd_boolean
12725 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12726 {
12727 return attr[Tag_DIV_use].i == 1;
12728 }
12729
12730 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12731 are conflicting attributes. */
12732
12733 static bfd_boolean
12734 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12735 {
12736 obj_attribute *in_attr;
12737 obj_attribute *out_attr;
12738 /* Some tags have 0 = don't care, 1 = strong requirement,
12739 2 = weak requirement. */
12740 static const int order_021[3] = {0, 2, 1};
12741 int i;
12742 bfd_boolean result = TRUE;
12743 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12744
12745 /* Skip the linker stubs file. This preserves previous behavior
12746 of accepting unknown attributes in the first input file - but
12747 is that a bug? */
12748 if (ibfd->flags & BFD_LINKER_CREATED)
12749 return TRUE;
12750
12751 /* Skip any input that hasn't attribute section.
12752 This enables to link object files without attribute section with
12753 any others. */
12754 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12755 return TRUE;
12756
12757 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12758 {
12759 /* This is the first object. Copy the attributes. */
12760 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12761
12762 out_attr = elf_known_obj_attributes_proc (obfd);
12763
12764 /* Use the Tag_null value to indicate the attributes have been
12765 initialized. */
12766 out_attr[0].i = 1;
12767
12768 /* We do not output objects with Tag_MPextension_use_legacy - we move
12769 the attribute's value to Tag_MPextension_use. */
12770 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12771 {
12772 if (out_attr[Tag_MPextension_use].i != 0
12773 && out_attr[Tag_MPextension_use_legacy].i
12774 != out_attr[Tag_MPextension_use].i)
12775 {
12776 _bfd_error_handler
12777 (_("Error: %B has both the current and legacy "
12778 "Tag_MPextension_use attributes"), ibfd);
12779 result = FALSE;
12780 }
12781
12782 out_attr[Tag_MPextension_use] =
12783 out_attr[Tag_MPextension_use_legacy];
12784 out_attr[Tag_MPextension_use_legacy].type = 0;
12785 out_attr[Tag_MPextension_use_legacy].i = 0;
12786 }
12787
12788 return result;
12789 }
12790
12791 in_attr = elf_known_obj_attributes_proc (ibfd);
12792 out_attr = elf_known_obj_attributes_proc (obfd);
12793 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12794 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12795 {
12796 /* Ignore mismatches if the object doesn't use floating point or is
12797 floating point ABI independent. */
12798 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12799 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12800 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12801 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12802 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12803 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12804 {
12805 _bfd_error_handler
12806 (_("error: %B uses VFP register arguments, %B does not"),
12807 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12808 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12809 result = FALSE;
12810 }
12811 }
12812
12813 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12814 {
12815 /* Merge this attribute with existing attributes. */
12816 switch (i)
12817 {
12818 case Tag_CPU_raw_name:
12819 case Tag_CPU_name:
12820 /* These are merged after Tag_CPU_arch. */
12821 break;
12822
12823 case Tag_ABI_optimization_goals:
12824 case Tag_ABI_FP_optimization_goals:
12825 /* Use the first value seen. */
12826 break;
12827
12828 case Tag_CPU_arch:
12829 {
12830 int secondary_compat = -1, secondary_compat_out = -1;
12831 unsigned int saved_out_attr = out_attr[i].i;
12832 int arch_attr;
12833 static const char *name_table[] =
12834 {
12835 /* These aren't real CPU names, but we can't guess
12836 that from the architecture version alone. */
12837 "Pre v4",
12838 "ARM v4",
12839 "ARM v4T",
12840 "ARM v5T",
12841 "ARM v5TE",
12842 "ARM v5TEJ",
12843 "ARM v6",
12844 "ARM v6KZ",
12845 "ARM v6T2",
12846 "ARM v6K",
12847 "ARM v7",
12848 "ARM v6-M",
12849 "ARM v6S-M",
12850 "ARM v8",
12851 "",
12852 "ARM v8-M.baseline",
12853 "ARM v8-M.mainline",
12854 };
12855
12856 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12857 secondary_compat = get_secondary_compatible_arch (ibfd);
12858 secondary_compat_out = get_secondary_compatible_arch (obfd);
12859 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12860 &secondary_compat_out,
12861 in_attr[i].i,
12862 secondary_compat);
12863
12864 /* Return with error if failed to merge. */
12865 if (arch_attr == -1)
12866 return FALSE;
12867
12868 out_attr[i].i = arch_attr;
12869
12870 set_secondary_compatible_arch (obfd, secondary_compat_out);
12871
12872 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12873 if (out_attr[i].i == saved_out_attr)
12874 ; /* Leave the names alone. */
12875 else if (out_attr[i].i == in_attr[i].i)
12876 {
12877 /* The output architecture has been changed to match the
12878 input architecture. Use the input names. */
12879 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12880 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12881 : NULL;
12882 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12883 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12884 : NULL;
12885 }
12886 else
12887 {
12888 out_attr[Tag_CPU_name].s = NULL;
12889 out_attr[Tag_CPU_raw_name].s = NULL;
12890 }
12891
12892 /* If we still don't have a value for Tag_CPU_name,
12893 make one up now. Tag_CPU_raw_name remains blank. */
12894 if (out_attr[Tag_CPU_name].s == NULL
12895 && out_attr[i].i < ARRAY_SIZE (name_table))
12896 out_attr[Tag_CPU_name].s =
12897 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12898 }
12899 break;
12900
12901 case Tag_ARM_ISA_use:
12902 case Tag_THUMB_ISA_use:
12903 case Tag_WMMX_arch:
12904 case Tag_Advanced_SIMD_arch:
12905 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12906 case Tag_ABI_FP_rounding:
12907 case Tag_ABI_FP_exceptions:
12908 case Tag_ABI_FP_user_exceptions:
12909 case Tag_ABI_FP_number_model:
12910 case Tag_FP_HP_extension:
12911 case Tag_CPU_unaligned_access:
12912 case Tag_T2EE_use:
12913 case Tag_MPextension_use:
12914 /* Use the largest value specified. */
12915 if (in_attr[i].i > out_attr[i].i)
12916 out_attr[i].i = in_attr[i].i;
12917 break;
12918
12919 case Tag_ABI_align_preserved:
12920 case Tag_ABI_PCS_RO_data:
12921 /* Use the smallest value specified. */
12922 if (in_attr[i].i < out_attr[i].i)
12923 out_attr[i].i = in_attr[i].i;
12924 break;
12925
12926 case Tag_ABI_align_needed:
12927 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12928 && (in_attr[Tag_ABI_align_preserved].i == 0
12929 || out_attr[Tag_ABI_align_preserved].i == 0))
12930 {
12931 /* This error message should be enabled once all non-conformant
12932 binaries in the toolchain have had the attributes set
12933 properly.
12934 _bfd_error_handler
12935 (_("error: %B: 8-byte data alignment conflicts with %B"),
12936 obfd, ibfd);
12937 result = FALSE; */
12938 }
12939 /* Fall through. */
12940 case Tag_ABI_FP_denormal:
12941 case Tag_ABI_PCS_GOT_use:
12942 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12943 value if greater than 2 (for future-proofing). */
12944 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12945 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12946 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12947 out_attr[i].i = in_attr[i].i;
12948 break;
12949
12950 case Tag_Virtualization_use:
12951 /* The virtualization tag effectively stores two bits of
12952 information: the intended use of TrustZone (in bit 0), and the
12953 intended use of Virtualization (in bit 1). */
12954 if (out_attr[i].i == 0)
12955 out_attr[i].i = in_attr[i].i;
12956 else if (in_attr[i].i != 0
12957 && in_attr[i].i != out_attr[i].i)
12958 {
12959 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12960 out_attr[i].i = 3;
12961 else
12962 {
12963 _bfd_error_handler
12964 (_("error: %B: unable to merge virtualization attributes "
12965 "with %B"),
12966 obfd, ibfd);
12967 result = FALSE;
12968 }
12969 }
12970 break;
12971
12972 case Tag_CPU_arch_profile:
12973 if (out_attr[i].i != in_attr[i].i)
12974 {
12975 /* 0 will merge with anything.
12976 'A' and 'S' merge to 'A'.
12977 'R' and 'S' merge to 'R'.
12978 'M' and 'A|R|S' is an error. */
12979 if (out_attr[i].i == 0
12980 || (out_attr[i].i == 'S'
12981 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12982 out_attr[i].i = in_attr[i].i;
12983 else if (in_attr[i].i == 0
12984 || (in_attr[i].i == 'S'
12985 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12986 ; /* Do nothing. */
12987 else
12988 {
12989 _bfd_error_handler
12990 (_("error: %B: Conflicting architecture profiles %c/%c"),
12991 ibfd,
12992 in_attr[i].i ? in_attr[i].i : '0',
12993 out_attr[i].i ? out_attr[i].i : '0');
12994 result = FALSE;
12995 }
12996 }
12997 break;
12998
12999 case Tag_DSP_extension:
13000 /* No need to change output value if any of:
13001 - pre (<=) ARMv5T input architecture (do not have DSP)
13002 - M input profile not ARMv7E-M and do not have DSP. */
13003 if (in_attr[Tag_CPU_arch].i <= 3
13004 || (in_attr[Tag_CPU_arch_profile].i == 'M'
13005 && in_attr[Tag_CPU_arch].i != 13
13006 && in_attr[i].i == 0))
13007 ; /* Do nothing. */
13008 /* Output value should be 0 if DSP part of architecture, ie.
13009 - post (>=) ARMv5te architecture output
13010 - A, R or S profile output or ARMv7E-M output architecture. */
13011 else if (out_attr[Tag_CPU_arch].i >= 4
13012 && (out_attr[Tag_CPU_arch_profile].i == 'A'
13013 || out_attr[Tag_CPU_arch_profile].i == 'R'
13014 || out_attr[Tag_CPU_arch_profile].i == 'S'
13015 || out_attr[Tag_CPU_arch].i == 13))
13016 out_attr[i].i = 0;
13017 /* Otherwise, DSP instructions are added and not part of output
13018 architecture. */
13019 else
13020 out_attr[i].i = 1;
13021 break;
13022
13023 case Tag_FP_arch:
13024 {
13025 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
13026 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
13027 when it's 0. It might mean absence of FP hardware if
13028 Tag_FP_arch is zero. */
13029
13030 #define VFP_VERSION_COUNT 9
13031 static const struct
13032 {
13033 int ver;
13034 int regs;
13035 } vfp_versions[VFP_VERSION_COUNT] =
13036 {
13037 {0, 0},
13038 {1, 16},
13039 {2, 16},
13040 {3, 32},
13041 {3, 16},
13042 {4, 32},
13043 {4, 16},
13044 {8, 32},
13045 {8, 16}
13046 };
13047 int ver;
13048 int regs;
13049 int newval;
13050
13051 /* If the output has no requirement about FP hardware,
13052 follow the requirement of the input. */
13053 if (out_attr[i].i == 0)
13054 {
13055 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
13056 out_attr[i].i = in_attr[i].i;
13057 out_attr[Tag_ABI_HardFP_use].i
13058 = in_attr[Tag_ABI_HardFP_use].i;
13059 break;
13060 }
13061 /* If the input has no requirement about FP hardware, do
13062 nothing. */
13063 else if (in_attr[i].i == 0)
13064 {
13065 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
13066 break;
13067 }
13068
13069 /* Both the input and the output have nonzero Tag_FP_arch.
13070 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13071
13072 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13073 do nothing. */
13074 if (in_attr[Tag_ABI_HardFP_use].i == 0
13075 && out_attr[Tag_ABI_HardFP_use].i == 0)
13076 ;
13077 /* If the input and the output have different Tag_ABI_HardFP_use,
13078 the combination of them is 0 (implied by Tag_FP_arch). */
13079 else if (in_attr[Tag_ABI_HardFP_use].i
13080 != out_attr[Tag_ABI_HardFP_use].i)
13081 out_attr[Tag_ABI_HardFP_use].i = 0;
13082
13083 /* Now we can handle Tag_FP_arch. */
13084
13085 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13086 pick the biggest. */
13087 if (in_attr[i].i >= VFP_VERSION_COUNT
13088 && in_attr[i].i > out_attr[i].i)
13089 {
13090 out_attr[i] = in_attr[i];
13091 break;
13092 }
13093 /* The output uses the superset of input features
13094 (ISA version) and registers. */
13095 ver = vfp_versions[in_attr[i].i].ver;
13096 if (ver < vfp_versions[out_attr[i].i].ver)
13097 ver = vfp_versions[out_attr[i].i].ver;
13098 regs = vfp_versions[in_attr[i].i].regs;
13099 if (regs < vfp_versions[out_attr[i].i].regs)
13100 regs = vfp_versions[out_attr[i].i].regs;
13101 /* This assumes all possible supersets are also a valid
13102 options. */
13103 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13104 {
13105 if (regs == vfp_versions[newval].regs
13106 && ver == vfp_versions[newval].ver)
13107 break;
13108 }
13109 out_attr[i].i = newval;
13110 }
13111 break;
13112 case Tag_PCS_config:
13113 if (out_attr[i].i == 0)
13114 out_attr[i].i = in_attr[i].i;
13115 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13116 {
13117 /* It's sometimes ok to mix different configs, so this is only
13118 a warning. */
13119 _bfd_error_handler
13120 (_("Warning: %B: Conflicting platform configuration"), ibfd);
13121 }
13122 break;
13123 case Tag_ABI_PCS_R9_use:
13124 if (in_attr[i].i != out_attr[i].i
13125 && out_attr[i].i != AEABI_R9_unused
13126 && in_attr[i].i != AEABI_R9_unused)
13127 {
13128 _bfd_error_handler
13129 (_("error: %B: Conflicting use of R9"), ibfd);
13130 result = FALSE;
13131 }
13132 if (out_attr[i].i == AEABI_R9_unused)
13133 out_attr[i].i = in_attr[i].i;
13134 break;
13135 case Tag_ABI_PCS_RW_data:
13136 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13137 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13138 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13139 {
13140 _bfd_error_handler
13141 (_("error: %B: SB relative addressing conflicts with use of R9"),
13142 ibfd);
13143 result = FALSE;
13144 }
13145 /* Use the smallest value specified. */
13146 if (in_attr[i].i < out_attr[i].i)
13147 out_attr[i].i = in_attr[i].i;
13148 break;
13149 case Tag_ABI_PCS_wchar_t:
13150 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13151 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13152 {
13153 _bfd_error_handler
13154 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13155 ibfd, in_attr[i].i, out_attr[i].i);
13156 }
13157 else if (in_attr[i].i && !out_attr[i].i)
13158 out_attr[i].i = in_attr[i].i;
13159 break;
13160 case Tag_ABI_enum_size:
13161 if (in_attr[i].i != AEABI_enum_unused)
13162 {
13163 if (out_attr[i].i == AEABI_enum_unused
13164 || out_attr[i].i == AEABI_enum_forced_wide)
13165 {
13166 /* The existing object is compatible with anything.
13167 Use whatever requirements the new object has. */
13168 out_attr[i].i = in_attr[i].i;
13169 }
13170 else if (in_attr[i].i != AEABI_enum_forced_wide
13171 && out_attr[i].i != in_attr[i].i
13172 && !elf_arm_tdata (obfd)->no_enum_size_warning)
13173 {
13174 static const char *aeabi_enum_names[] =
13175 { "", "variable-size", "32-bit", "" };
13176 const char *in_name =
13177 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13178 ? aeabi_enum_names[in_attr[i].i]
13179 : "<unknown>";
13180 const char *out_name =
13181 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13182 ? aeabi_enum_names[out_attr[i].i]
13183 : "<unknown>";
13184 _bfd_error_handler
13185 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13186 ibfd, in_name, out_name);
13187 }
13188 }
13189 break;
13190 case Tag_ABI_VFP_args:
13191 /* Aready done. */
13192 break;
13193 case Tag_ABI_WMMX_args:
13194 if (in_attr[i].i != out_attr[i].i)
13195 {
13196 _bfd_error_handler
13197 (_("error: %B uses iWMMXt register arguments, %B does not"),
13198 ibfd, obfd);
13199 result = FALSE;
13200 }
13201 break;
13202 case Tag_compatibility:
13203 /* Merged in target-independent code. */
13204 break;
13205 case Tag_ABI_HardFP_use:
13206 /* This is handled along with Tag_FP_arch. */
13207 break;
13208 case Tag_ABI_FP_16bit_format:
13209 if (in_attr[i].i != 0 && out_attr[i].i != 0)
13210 {
13211 if (in_attr[i].i != out_attr[i].i)
13212 {
13213 _bfd_error_handler
13214 (_("error: fp16 format mismatch between %B and %B"),
13215 ibfd, obfd);
13216 result = FALSE;
13217 }
13218 }
13219 if (in_attr[i].i != 0)
13220 out_attr[i].i = in_attr[i].i;
13221 break;
13222
13223 case Tag_DIV_use:
13224 /* A value of zero on input means that the divide instruction may
13225 be used if available in the base architecture as specified via
13226 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13227 the user did not want divide instructions. A value of 2
13228 explicitly means that divide instructions were allowed in ARM
13229 and Thumb state. */
13230 if (in_attr[i].i == out_attr[i].i)
13231 /* Do nothing. */ ;
13232 else if (elf32_arm_attributes_forbid_div (in_attr)
13233 && !elf32_arm_attributes_accept_div (out_attr))
13234 out_attr[i].i = 1;
13235 else if (elf32_arm_attributes_forbid_div (out_attr)
13236 && elf32_arm_attributes_accept_div (in_attr))
13237 out_attr[i].i = in_attr[i].i;
13238 else if (in_attr[i].i == 2)
13239 out_attr[i].i = in_attr[i].i;
13240 break;
13241
13242 case Tag_MPextension_use_legacy:
13243 /* We don't output objects with Tag_MPextension_use_legacy - we
13244 move the value to Tag_MPextension_use. */
13245 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13246 {
13247 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13248 {
13249 _bfd_error_handler
13250 (_("%B has has both the current and legacy "
13251 "Tag_MPextension_use attributes"),
13252 ibfd);
13253 result = FALSE;
13254 }
13255 }
13256
13257 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13258 out_attr[Tag_MPextension_use] = in_attr[i];
13259
13260 break;
13261
13262 case Tag_nodefaults:
13263 /* This tag is set if it exists, but the value is unused (and is
13264 typically zero). We don't actually need to do anything here -
13265 the merge happens automatically when the type flags are merged
13266 below. */
13267 break;
13268 case Tag_also_compatible_with:
13269 /* Already done in Tag_CPU_arch. */
13270 break;
13271 case Tag_conformance:
13272 /* Keep the attribute if it matches. Throw it away otherwise.
13273 No attribute means no claim to conform. */
13274 if (!in_attr[i].s || !out_attr[i].s
13275 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13276 out_attr[i].s = NULL;
13277 break;
13278
13279 default:
13280 result
13281 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13282 }
13283
13284 /* If out_attr was copied from in_attr then it won't have a type yet. */
13285 if (in_attr[i].type && !out_attr[i].type)
13286 out_attr[i].type = in_attr[i].type;
13287 }
13288
13289 /* Merge Tag_compatibility attributes and any common GNU ones. */
13290 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13291 return FALSE;
13292
13293 /* Check for any attributes not known on ARM. */
13294 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13295
13296 return result;
13297 }
13298
13299
13300 /* Return TRUE if the two EABI versions are incompatible. */
13301
13302 static bfd_boolean
13303 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13304 {
13305 /* v4 and v5 are the same spec before and after it was released,
13306 so allow mixing them. */
13307 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13308 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13309 return TRUE;
13310
13311 return (iver == over);
13312 }
13313
13314 /* Merge backend specific data from an object file to the output
13315 object file when linking. */
13316
13317 static bfd_boolean
13318 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13319
13320 /* Display the flags field. */
13321
13322 static bfd_boolean
13323 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13324 {
13325 FILE * file = (FILE *) ptr;
13326 unsigned long flags;
13327
13328 BFD_ASSERT (abfd != NULL && ptr != NULL);
13329
13330 /* Print normal ELF private data. */
13331 _bfd_elf_print_private_bfd_data (abfd, ptr);
13332
13333 flags = elf_elfheader (abfd)->e_flags;
13334 /* Ignore init flag - it may not be set, despite the flags field
13335 containing valid data. */
13336
13337 /* xgettext:c-format */
13338 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13339
13340 switch (EF_ARM_EABI_VERSION (flags))
13341 {
13342 case EF_ARM_EABI_UNKNOWN:
13343 /* The following flag bits are GNU extensions and not part of the
13344 official ARM ELF extended ABI. Hence they are only decoded if
13345 the EABI version is not set. */
13346 if (flags & EF_ARM_INTERWORK)
13347 fprintf (file, _(" [interworking enabled]"));
13348
13349 if (flags & EF_ARM_APCS_26)
13350 fprintf (file, " [APCS-26]");
13351 else
13352 fprintf (file, " [APCS-32]");
13353
13354 if (flags & EF_ARM_VFP_FLOAT)
13355 fprintf (file, _(" [VFP float format]"));
13356 else if (flags & EF_ARM_MAVERICK_FLOAT)
13357 fprintf (file, _(" [Maverick float format]"));
13358 else
13359 fprintf (file, _(" [FPA float format]"));
13360
13361 if (flags & EF_ARM_APCS_FLOAT)
13362 fprintf (file, _(" [floats passed in float registers]"));
13363
13364 if (flags & EF_ARM_PIC)
13365 fprintf (file, _(" [position independent]"));
13366
13367 if (flags & EF_ARM_NEW_ABI)
13368 fprintf (file, _(" [new ABI]"));
13369
13370 if (flags & EF_ARM_OLD_ABI)
13371 fprintf (file, _(" [old ABI]"));
13372
13373 if (flags & EF_ARM_SOFT_FLOAT)
13374 fprintf (file, _(" [software FP]"));
13375
13376 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13377 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13378 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13379 | EF_ARM_MAVERICK_FLOAT);
13380 break;
13381
13382 case EF_ARM_EABI_VER1:
13383 fprintf (file, _(" [Version1 EABI]"));
13384
13385 if (flags & EF_ARM_SYMSARESORTED)
13386 fprintf (file, _(" [sorted symbol table]"));
13387 else
13388 fprintf (file, _(" [unsorted symbol table]"));
13389
13390 flags &= ~ EF_ARM_SYMSARESORTED;
13391 break;
13392
13393 case EF_ARM_EABI_VER2:
13394 fprintf (file, _(" [Version2 EABI]"));
13395
13396 if (flags & EF_ARM_SYMSARESORTED)
13397 fprintf (file, _(" [sorted symbol table]"));
13398 else
13399 fprintf (file, _(" [unsorted symbol table]"));
13400
13401 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13402 fprintf (file, _(" [dynamic symbols use segment index]"));
13403
13404 if (flags & EF_ARM_MAPSYMSFIRST)
13405 fprintf (file, _(" [mapping symbols precede others]"));
13406
13407 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13408 | EF_ARM_MAPSYMSFIRST);
13409 break;
13410
13411 case EF_ARM_EABI_VER3:
13412 fprintf (file, _(" [Version3 EABI]"));
13413 break;
13414
13415 case EF_ARM_EABI_VER4:
13416 fprintf (file, _(" [Version4 EABI]"));
13417 goto eabi;
13418
13419 case EF_ARM_EABI_VER5:
13420 fprintf (file, _(" [Version5 EABI]"));
13421
13422 if (flags & EF_ARM_ABI_FLOAT_SOFT)
13423 fprintf (file, _(" [soft-float ABI]"));
13424
13425 if (flags & EF_ARM_ABI_FLOAT_HARD)
13426 fprintf (file, _(" [hard-float ABI]"));
13427
13428 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13429
13430 eabi:
13431 if (flags & EF_ARM_BE8)
13432 fprintf (file, _(" [BE8]"));
13433
13434 if (flags & EF_ARM_LE8)
13435 fprintf (file, _(" [LE8]"));
13436
13437 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13438 break;
13439
13440 default:
13441 fprintf (file, _(" <EABI version unrecognised>"));
13442 break;
13443 }
13444
13445 flags &= ~ EF_ARM_EABIMASK;
13446
13447 if (flags & EF_ARM_RELEXEC)
13448 fprintf (file, _(" [relocatable executable]"));
13449
13450 flags &= ~EF_ARM_RELEXEC;
13451
13452 if (flags)
13453 fprintf (file, _("<Unrecognised flag bits set>"));
13454
13455 fputc ('\n', file);
13456
13457 return TRUE;
13458 }
13459
13460 static int
13461 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13462 {
13463 switch (ELF_ST_TYPE (elf_sym->st_info))
13464 {
13465 case STT_ARM_TFUNC:
13466 return ELF_ST_TYPE (elf_sym->st_info);
13467
13468 case STT_ARM_16BIT:
13469 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13470 This allows us to distinguish between data used by Thumb instructions
13471 and non-data (which is probably code) inside Thumb regions of an
13472 executable. */
13473 if (type != STT_OBJECT && type != STT_TLS)
13474 return ELF_ST_TYPE (elf_sym->st_info);
13475 break;
13476
13477 default:
13478 break;
13479 }
13480
13481 return type;
13482 }
13483
13484 static asection *
13485 elf32_arm_gc_mark_hook (asection *sec,
13486 struct bfd_link_info *info,
13487 Elf_Internal_Rela *rel,
13488 struct elf_link_hash_entry *h,
13489 Elf_Internal_Sym *sym)
13490 {
13491 if (h != NULL)
13492 switch (ELF32_R_TYPE (rel->r_info))
13493 {
13494 case R_ARM_GNU_VTINHERIT:
13495 case R_ARM_GNU_VTENTRY:
13496 return NULL;
13497 }
13498
13499 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13500 }
13501
13502 /* Update the got entry reference counts for the section being removed. */
13503
13504 static bfd_boolean
13505 elf32_arm_gc_sweep_hook (bfd * abfd,
13506 struct bfd_link_info * info,
13507 asection * sec,
13508 const Elf_Internal_Rela * relocs)
13509 {
13510 Elf_Internal_Shdr *symtab_hdr;
13511 struct elf_link_hash_entry **sym_hashes;
13512 bfd_signed_vma *local_got_refcounts;
13513 const Elf_Internal_Rela *rel, *relend;
13514 struct elf32_arm_link_hash_table * globals;
13515
13516 if (bfd_link_relocatable (info))
13517 return TRUE;
13518
13519 globals = elf32_arm_hash_table (info);
13520 if (globals == NULL)
13521 return FALSE;
13522
13523 elf_section_data (sec)->local_dynrel = NULL;
13524
13525 symtab_hdr = & elf_symtab_hdr (abfd);
13526 sym_hashes = elf_sym_hashes (abfd);
13527 local_got_refcounts = elf_local_got_refcounts (abfd);
13528
13529 check_use_blx (globals);
13530
13531 relend = relocs + sec->reloc_count;
13532 for (rel = relocs; rel < relend; rel++)
13533 {
13534 unsigned long r_symndx;
13535 struct elf_link_hash_entry *h = NULL;
13536 struct elf32_arm_link_hash_entry *eh;
13537 int r_type;
13538 bfd_boolean call_reloc_p;
13539 bfd_boolean may_become_dynamic_p;
13540 bfd_boolean may_need_local_target_p;
13541 union gotplt_union *root_plt;
13542 struct arm_plt_info *arm_plt;
13543
13544 r_symndx = ELF32_R_SYM (rel->r_info);
13545 if (r_symndx >= symtab_hdr->sh_info)
13546 {
13547 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13548 while (h->root.type == bfd_link_hash_indirect
13549 || h->root.type == bfd_link_hash_warning)
13550 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13551 }
13552 eh = (struct elf32_arm_link_hash_entry *) h;
13553
13554 call_reloc_p = FALSE;
13555 may_become_dynamic_p = FALSE;
13556 may_need_local_target_p = FALSE;
13557
13558 r_type = ELF32_R_TYPE (rel->r_info);
13559 r_type = arm_real_reloc_type (globals, r_type);
13560 switch (r_type)
13561 {
13562 case R_ARM_GOT32:
13563 case R_ARM_GOT_PREL:
13564 case R_ARM_TLS_GD32:
13565 case R_ARM_TLS_IE32:
13566 if (h != NULL)
13567 {
13568 if (h->got.refcount > 0)
13569 h->got.refcount -= 1;
13570 }
13571 else if (local_got_refcounts != NULL)
13572 {
13573 if (local_got_refcounts[r_symndx] > 0)
13574 local_got_refcounts[r_symndx] -= 1;
13575 }
13576 break;
13577
13578 case R_ARM_TLS_LDM32:
13579 globals->tls_ldm_got.refcount -= 1;
13580 break;
13581
13582 case R_ARM_PC24:
13583 case R_ARM_PLT32:
13584 case R_ARM_CALL:
13585 case R_ARM_JUMP24:
13586 case R_ARM_PREL31:
13587 case R_ARM_THM_CALL:
13588 case R_ARM_THM_JUMP24:
13589 case R_ARM_THM_JUMP19:
13590 call_reloc_p = TRUE;
13591 may_need_local_target_p = TRUE;
13592 break;
13593
13594 case R_ARM_ABS12:
13595 if (!globals->vxworks_p)
13596 {
13597 may_need_local_target_p = TRUE;
13598 break;
13599 }
13600 /* Fall through. */
13601 case R_ARM_ABS32:
13602 case R_ARM_ABS32_NOI:
13603 case R_ARM_REL32:
13604 case R_ARM_REL32_NOI:
13605 case R_ARM_MOVW_ABS_NC:
13606 case R_ARM_MOVT_ABS:
13607 case R_ARM_MOVW_PREL_NC:
13608 case R_ARM_MOVT_PREL:
13609 case R_ARM_THM_MOVW_ABS_NC:
13610 case R_ARM_THM_MOVT_ABS:
13611 case R_ARM_THM_MOVW_PREL_NC:
13612 case R_ARM_THM_MOVT_PREL:
13613 /* Should the interworking branches be here also? */
13614 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13615 && (sec->flags & SEC_ALLOC) != 0)
13616 {
13617 if (h == NULL
13618 && elf32_arm_howto_from_type (r_type)->pc_relative)
13619 {
13620 call_reloc_p = TRUE;
13621 may_need_local_target_p = TRUE;
13622 }
13623 else
13624 may_become_dynamic_p = TRUE;
13625 }
13626 else
13627 may_need_local_target_p = TRUE;
13628 break;
13629
13630 default:
13631 break;
13632 }
13633
13634 if (may_need_local_target_p
13635 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13636 {
13637 /* If PLT refcount book-keeping is wrong and too low, we'll
13638 see a zero value (going to -1) for the root PLT reference
13639 count. */
13640 if (root_plt->refcount >= 0)
13641 {
13642 BFD_ASSERT (root_plt->refcount != 0);
13643 root_plt->refcount -= 1;
13644 }
13645 else
13646 /* A value of -1 means the symbol has become local, forced
13647 or seeing a hidden definition. Any other negative value
13648 is an error. */
13649 BFD_ASSERT (root_plt->refcount == -1);
13650
13651 if (!call_reloc_p)
13652 arm_plt->noncall_refcount--;
13653
13654 if (r_type == R_ARM_THM_CALL)
13655 arm_plt->maybe_thumb_refcount--;
13656
13657 if (r_type == R_ARM_THM_JUMP24
13658 || r_type == R_ARM_THM_JUMP19)
13659 arm_plt->thumb_refcount--;
13660 }
13661
13662 if (may_become_dynamic_p)
13663 {
13664 struct elf_dyn_relocs **pp;
13665 struct elf_dyn_relocs *p;
13666
13667 if (h != NULL)
13668 pp = &(eh->dyn_relocs);
13669 else
13670 {
13671 Elf_Internal_Sym *isym;
13672
13673 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13674 abfd, r_symndx);
13675 if (isym == NULL)
13676 return FALSE;
13677 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13678 if (pp == NULL)
13679 return FALSE;
13680 }
13681 for (; (p = *pp) != NULL; pp = &p->next)
13682 if (p->sec == sec)
13683 {
13684 /* Everything must go for SEC. */
13685 *pp = p->next;
13686 break;
13687 }
13688 }
13689 }
13690
13691 return TRUE;
13692 }
13693
13694 /* Look through the relocs for a section during the first phase. */
13695
13696 static bfd_boolean
13697 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13698 asection *sec, const Elf_Internal_Rela *relocs)
13699 {
13700 Elf_Internal_Shdr *symtab_hdr;
13701 struct elf_link_hash_entry **sym_hashes;
13702 const Elf_Internal_Rela *rel;
13703 const Elf_Internal_Rela *rel_end;
13704 bfd *dynobj;
13705 asection *sreloc;
13706 struct elf32_arm_link_hash_table *htab;
13707 bfd_boolean call_reloc_p;
13708 bfd_boolean may_become_dynamic_p;
13709 bfd_boolean may_need_local_target_p;
13710 unsigned long nsyms;
13711
13712 if (bfd_link_relocatable (info))
13713 return TRUE;
13714
13715 BFD_ASSERT (is_arm_elf (abfd));
13716
13717 htab = elf32_arm_hash_table (info);
13718 if (htab == NULL)
13719 return FALSE;
13720
13721 sreloc = NULL;
13722
13723 /* Create dynamic sections for relocatable executables so that we can
13724 copy relocations. */
13725 if (htab->root.is_relocatable_executable
13726 && ! htab->root.dynamic_sections_created)
13727 {
13728 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13729 return FALSE;
13730 }
13731
13732 if (htab->root.dynobj == NULL)
13733 htab->root.dynobj = abfd;
13734 if (!create_ifunc_sections (info))
13735 return FALSE;
13736
13737 dynobj = htab->root.dynobj;
13738
13739 symtab_hdr = & elf_symtab_hdr (abfd);
13740 sym_hashes = elf_sym_hashes (abfd);
13741 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13742
13743 rel_end = relocs + sec->reloc_count;
13744 for (rel = relocs; rel < rel_end; rel++)
13745 {
13746 Elf_Internal_Sym *isym;
13747 struct elf_link_hash_entry *h;
13748 struct elf32_arm_link_hash_entry *eh;
13749 unsigned long r_symndx;
13750 int r_type;
13751
13752 r_symndx = ELF32_R_SYM (rel->r_info);
13753 r_type = ELF32_R_TYPE (rel->r_info);
13754 r_type = arm_real_reloc_type (htab, r_type);
13755
13756 if (r_symndx >= nsyms
13757 /* PR 9934: It is possible to have relocations that do not
13758 refer to symbols, thus it is also possible to have an
13759 object file containing relocations but no symbol table. */
13760 && (r_symndx > STN_UNDEF || nsyms > 0))
13761 {
13762 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13763 r_symndx);
13764 return FALSE;
13765 }
13766
13767 h = NULL;
13768 isym = NULL;
13769 if (nsyms > 0)
13770 {
13771 if (r_symndx < symtab_hdr->sh_info)
13772 {
13773 /* A local symbol. */
13774 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13775 abfd, r_symndx);
13776 if (isym == NULL)
13777 return FALSE;
13778 }
13779 else
13780 {
13781 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13782 while (h->root.type == bfd_link_hash_indirect
13783 || h->root.type == bfd_link_hash_warning)
13784 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13785
13786 /* PR15323, ref flags aren't set for references in the
13787 same object. */
13788 h->root.non_ir_ref = 1;
13789 }
13790 }
13791
13792 eh = (struct elf32_arm_link_hash_entry *) h;
13793
13794 call_reloc_p = FALSE;
13795 may_become_dynamic_p = FALSE;
13796 may_need_local_target_p = FALSE;
13797
13798 /* Could be done earlier, if h were already available. */
13799 r_type = elf32_arm_tls_transition (info, r_type, h);
13800 switch (r_type)
13801 {
13802 case R_ARM_GOT32:
13803 case R_ARM_GOT_PREL:
13804 case R_ARM_TLS_GD32:
13805 case R_ARM_TLS_IE32:
13806 case R_ARM_TLS_GOTDESC:
13807 case R_ARM_TLS_DESCSEQ:
13808 case R_ARM_THM_TLS_DESCSEQ:
13809 case R_ARM_TLS_CALL:
13810 case R_ARM_THM_TLS_CALL:
13811 /* This symbol requires a global offset table entry. */
13812 {
13813 int tls_type, old_tls_type;
13814
13815 switch (r_type)
13816 {
13817 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13818
13819 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13820
13821 case R_ARM_TLS_GOTDESC:
13822 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13823 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13824 tls_type = GOT_TLS_GDESC; break;
13825
13826 default: tls_type = GOT_NORMAL; break;
13827 }
13828
13829 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13830 info->flags |= DF_STATIC_TLS;
13831
13832 if (h != NULL)
13833 {
13834 h->got.refcount++;
13835 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13836 }
13837 else
13838 {
13839 /* This is a global offset table entry for a local symbol. */
13840 if (!elf32_arm_allocate_local_sym_info (abfd))
13841 return FALSE;
13842 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13843 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13844 }
13845
13846 /* If a variable is accessed with both tls methods, two
13847 slots may be created. */
13848 if (GOT_TLS_GD_ANY_P (old_tls_type)
13849 && GOT_TLS_GD_ANY_P (tls_type))
13850 tls_type |= old_tls_type;
13851
13852 /* We will already have issued an error message if there
13853 is a TLS/non-TLS mismatch, based on the symbol
13854 type. So just combine any TLS types needed. */
13855 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13856 && tls_type != GOT_NORMAL)
13857 tls_type |= old_tls_type;
13858
13859 /* If the symbol is accessed in both IE and GDESC
13860 method, we're able to relax. Turn off the GDESC flag,
13861 without messing up with any other kind of tls types
13862 that may be involved. */
13863 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13864 tls_type &= ~GOT_TLS_GDESC;
13865
13866 if (old_tls_type != tls_type)
13867 {
13868 if (h != NULL)
13869 elf32_arm_hash_entry (h)->tls_type = tls_type;
13870 else
13871 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13872 }
13873 }
13874 /* Fall through. */
13875
13876 case R_ARM_TLS_LDM32:
13877 if (r_type == R_ARM_TLS_LDM32)
13878 htab->tls_ldm_got.refcount++;
13879 /* Fall through. */
13880
13881 case R_ARM_GOTOFF32:
13882 case R_ARM_GOTPC:
13883 if (htab->root.sgot == NULL
13884 && !create_got_section (htab->root.dynobj, info))
13885 return FALSE;
13886 break;
13887
13888 case R_ARM_PC24:
13889 case R_ARM_PLT32:
13890 case R_ARM_CALL:
13891 case R_ARM_JUMP24:
13892 case R_ARM_PREL31:
13893 case R_ARM_THM_CALL:
13894 case R_ARM_THM_JUMP24:
13895 case R_ARM_THM_JUMP19:
13896 call_reloc_p = TRUE;
13897 may_need_local_target_p = TRUE;
13898 break;
13899
13900 case R_ARM_ABS12:
13901 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13902 ldr __GOTT_INDEX__ offsets. */
13903 if (!htab->vxworks_p)
13904 {
13905 may_need_local_target_p = TRUE;
13906 break;
13907 }
13908 else goto jump_over;
13909
13910 /* Fall through. */
13911
13912 case R_ARM_MOVW_ABS_NC:
13913 case R_ARM_MOVT_ABS:
13914 case R_ARM_THM_MOVW_ABS_NC:
13915 case R_ARM_THM_MOVT_ABS:
13916 if (bfd_link_pic (info))
13917 {
13918 (*_bfd_error_handler)
13919 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13920 abfd, elf32_arm_howto_table_1[r_type].name,
13921 (h) ? h->root.root.string : "a local symbol");
13922 bfd_set_error (bfd_error_bad_value);
13923 return FALSE;
13924 }
13925
13926 /* Fall through. */
13927 case R_ARM_ABS32:
13928 case R_ARM_ABS32_NOI:
13929 jump_over:
13930 if (h != NULL && bfd_link_executable (info))
13931 {
13932 h->pointer_equality_needed = 1;
13933 }
13934 /* Fall through. */
13935 case R_ARM_REL32:
13936 case R_ARM_REL32_NOI:
13937 case R_ARM_MOVW_PREL_NC:
13938 case R_ARM_MOVT_PREL:
13939 case R_ARM_THM_MOVW_PREL_NC:
13940 case R_ARM_THM_MOVT_PREL:
13941
13942 /* Should the interworking branches be listed here? */
13943 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13944 && (sec->flags & SEC_ALLOC) != 0)
13945 {
13946 if (h == NULL
13947 && elf32_arm_howto_from_type (r_type)->pc_relative)
13948 {
13949 /* In shared libraries and relocatable executables,
13950 we treat local relative references as calls;
13951 see the related SYMBOL_CALLS_LOCAL code in
13952 allocate_dynrelocs. */
13953 call_reloc_p = TRUE;
13954 may_need_local_target_p = TRUE;
13955 }
13956 else
13957 /* We are creating a shared library or relocatable
13958 executable, and this is a reloc against a global symbol,
13959 or a non-PC-relative reloc against a local symbol.
13960 We may need to copy the reloc into the output. */
13961 may_become_dynamic_p = TRUE;
13962 }
13963 else
13964 may_need_local_target_p = TRUE;
13965 break;
13966
13967 /* This relocation describes the C++ object vtable hierarchy.
13968 Reconstruct it for later use during GC. */
13969 case R_ARM_GNU_VTINHERIT:
13970 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13971 return FALSE;
13972 break;
13973
13974 /* This relocation describes which C++ vtable entries are actually
13975 used. Record for later use during GC. */
13976 case R_ARM_GNU_VTENTRY:
13977 BFD_ASSERT (h != NULL);
13978 if (h != NULL
13979 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13980 return FALSE;
13981 break;
13982 }
13983
13984 if (h != NULL)
13985 {
13986 if (call_reloc_p)
13987 /* We may need a .plt entry if the function this reloc
13988 refers to is in a different object, regardless of the
13989 symbol's type. We can't tell for sure yet, because
13990 something later might force the symbol local. */
13991 h->needs_plt = 1;
13992 else if (may_need_local_target_p)
13993 /* If this reloc is in a read-only section, we might
13994 need a copy reloc. We can't check reliably at this
13995 stage whether the section is read-only, as input
13996 sections have not yet been mapped to output sections.
13997 Tentatively set the flag for now, and correct in
13998 adjust_dynamic_symbol. */
13999 h->non_got_ref = 1;
14000 }
14001
14002 if (may_need_local_target_p
14003 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
14004 {
14005 union gotplt_union *root_plt;
14006 struct arm_plt_info *arm_plt;
14007 struct arm_local_iplt_info *local_iplt;
14008
14009 if (h != NULL)
14010 {
14011 root_plt = &h->plt;
14012 arm_plt = &eh->plt;
14013 }
14014 else
14015 {
14016 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
14017 if (local_iplt == NULL)
14018 return FALSE;
14019 root_plt = &local_iplt->root;
14020 arm_plt = &local_iplt->arm;
14021 }
14022
14023 /* If the symbol is a function that doesn't bind locally,
14024 this relocation will need a PLT entry. */
14025 if (root_plt->refcount != -1)
14026 root_plt->refcount += 1;
14027
14028 if (!call_reloc_p)
14029 arm_plt->noncall_refcount++;
14030
14031 /* It's too early to use htab->use_blx here, so we have to
14032 record possible blx references separately from
14033 relocs that definitely need a thumb stub. */
14034
14035 if (r_type == R_ARM_THM_CALL)
14036 arm_plt->maybe_thumb_refcount += 1;
14037
14038 if (r_type == R_ARM_THM_JUMP24
14039 || r_type == R_ARM_THM_JUMP19)
14040 arm_plt->thumb_refcount += 1;
14041 }
14042
14043 if (may_become_dynamic_p)
14044 {
14045 struct elf_dyn_relocs *p, **head;
14046
14047 /* Create a reloc section in dynobj. */
14048 if (sreloc == NULL)
14049 {
14050 sreloc = _bfd_elf_make_dynamic_reloc_section
14051 (sec, dynobj, 2, abfd, ! htab->use_rel);
14052
14053 if (sreloc == NULL)
14054 return FALSE;
14055
14056 /* BPABI objects never have dynamic relocations mapped. */
14057 if (htab->symbian_p)
14058 {
14059 flagword flags;
14060
14061 flags = bfd_get_section_flags (dynobj, sreloc);
14062 flags &= ~(SEC_LOAD | SEC_ALLOC);
14063 bfd_set_section_flags (dynobj, sreloc, flags);
14064 }
14065 }
14066
14067 /* If this is a global symbol, count the number of
14068 relocations we need for this symbol. */
14069 if (h != NULL)
14070 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14071 else
14072 {
14073 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14074 if (head == NULL)
14075 return FALSE;
14076 }
14077
14078 p = *head;
14079 if (p == NULL || p->sec != sec)
14080 {
14081 bfd_size_type amt = sizeof *p;
14082
14083 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14084 if (p == NULL)
14085 return FALSE;
14086 p->next = *head;
14087 *head = p;
14088 p->sec = sec;
14089 p->count = 0;
14090 p->pc_count = 0;
14091 }
14092
14093 if (elf32_arm_howto_from_type (r_type)->pc_relative)
14094 p->pc_count += 1;
14095 p->count += 1;
14096 }
14097 }
14098
14099 return TRUE;
14100 }
14101
14102 /* Unwinding tables are not referenced directly. This pass marks them as
14103 required if the corresponding code section is marked. */
14104
14105 static bfd_boolean
14106 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14107 elf_gc_mark_hook_fn gc_mark_hook)
14108 {
14109 bfd *sub;
14110 Elf_Internal_Shdr **elf_shdrp;
14111 bfd_boolean again;
14112
14113 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14114
14115 /* Marking EH data may cause additional code sections to be marked,
14116 requiring multiple passes. */
14117 again = TRUE;
14118 while (again)
14119 {
14120 again = FALSE;
14121 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14122 {
14123 asection *o;
14124
14125 if (! is_arm_elf (sub))
14126 continue;
14127
14128 elf_shdrp = elf_elfsections (sub);
14129 for (o = sub->sections; o != NULL; o = o->next)
14130 {
14131 Elf_Internal_Shdr *hdr;
14132
14133 hdr = &elf_section_data (o)->this_hdr;
14134 if (hdr->sh_type == SHT_ARM_EXIDX
14135 && hdr->sh_link
14136 && hdr->sh_link < elf_numsections (sub)
14137 && !o->gc_mark
14138 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14139 {
14140 again = TRUE;
14141 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14142 return FALSE;
14143 }
14144 }
14145 }
14146 }
14147
14148 return TRUE;
14149 }
14150
14151 /* Treat mapping symbols as special target symbols. */
14152
14153 static bfd_boolean
14154 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
14155 {
14156 return bfd_is_arm_special_symbol_name (sym->name,
14157 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
14158 }
14159
14160 /* This is a copy of elf_find_function() from elf.c except that
14161 ARM mapping symbols are ignored when looking for function names
14162 and STT_ARM_TFUNC is considered to a function type. */
14163
14164 static bfd_boolean
14165 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
14166 asymbol ** symbols,
14167 asection * section,
14168 bfd_vma offset,
14169 const char ** filename_ptr,
14170 const char ** functionname_ptr)
14171 {
14172 const char * filename = NULL;
14173 asymbol * func = NULL;
14174 bfd_vma low_func = 0;
14175 asymbol ** p;
14176
14177 for (p = symbols; *p != NULL; p++)
14178 {
14179 elf_symbol_type *q;
14180
14181 q = (elf_symbol_type *) *p;
14182
14183 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
14184 {
14185 default:
14186 break;
14187 case STT_FILE:
14188 filename = bfd_asymbol_name (&q->symbol);
14189 break;
14190 case STT_FUNC:
14191 case STT_ARM_TFUNC:
14192 case STT_NOTYPE:
14193 /* Skip mapping symbols. */
14194 if ((q->symbol.flags & BSF_LOCAL)
14195 && bfd_is_arm_special_symbol_name (q->symbol.name,
14196 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14197 continue;
14198 /* Fall through. */
14199 if (bfd_get_section (&q->symbol) == section
14200 && q->symbol.value >= low_func
14201 && q->symbol.value <= offset)
14202 {
14203 func = (asymbol *) q;
14204 low_func = q->symbol.value;
14205 }
14206 break;
14207 }
14208 }
14209
14210 if (func == NULL)
14211 return FALSE;
14212
14213 if (filename_ptr)
14214 *filename_ptr = filename;
14215 if (functionname_ptr)
14216 *functionname_ptr = bfd_asymbol_name (func);
14217
14218 return TRUE;
14219 }
14220
14221
14222 /* Find the nearest line to a particular section and offset, for error
14223 reporting. This code is a duplicate of the code in elf.c, except
14224 that it uses arm_elf_find_function. */
14225
14226 static bfd_boolean
14227 elf32_arm_find_nearest_line (bfd * abfd,
14228 asymbol ** symbols,
14229 asection * section,
14230 bfd_vma offset,
14231 const char ** filename_ptr,
14232 const char ** functionname_ptr,
14233 unsigned int * line_ptr,
14234 unsigned int * discriminator_ptr)
14235 {
14236 bfd_boolean found = FALSE;
14237
14238 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14239 filename_ptr, functionname_ptr,
14240 line_ptr, discriminator_ptr,
14241 dwarf_debug_sections, 0,
14242 & elf_tdata (abfd)->dwarf2_find_line_info))
14243 {
14244 if (!*functionname_ptr)
14245 arm_elf_find_function (abfd, symbols, section, offset,
14246 *filename_ptr ? NULL : filename_ptr,
14247 functionname_ptr);
14248
14249 return TRUE;
14250 }
14251
14252 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14253 uses DWARF1. */
14254
14255 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14256 & found, filename_ptr,
14257 functionname_ptr, line_ptr,
14258 & elf_tdata (abfd)->line_info))
14259 return FALSE;
14260
14261 if (found && (*functionname_ptr || *line_ptr))
14262 return TRUE;
14263
14264 if (symbols == NULL)
14265 return FALSE;
14266
14267 if (! arm_elf_find_function (abfd, symbols, section, offset,
14268 filename_ptr, functionname_ptr))
14269 return FALSE;
14270
14271 *line_ptr = 0;
14272 return TRUE;
14273 }
14274
14275 static bfd_boolean
14276 elf32_arm_find_inliner_info (bfd * abfd,
14277 const char ** filename_ptr,
14278 const char ** functionname_ptr,
14279 unsigned int * line_ptr)
14280 {
14281 bfd_boolean found;
14282 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14283 functionname_ptr, line_ptr,
14284 & elf_tdata (abfd)->dwarf2_find_line_info);
14285 return found;
14286 }
14287
14288 /* Adjust a symbol defined by a dynamic object and referenced by a
14289 regular object. The current definition is in some section of the
14290 dynamic object, but we're not including those sections. We have to
14291 change the definition to something the rest of the link can
14292 understand. */
14293
14294 static bfd_boolean
14295 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14296 struct elf_link_hash_entry * h)
14297 {
14298 bfd * dynobj;
14299 asection * s;
14300 struct elf32_arm_link_hash_entry * eh;
14301 struct elf32_arm_link_hash_table *globals;
14302
14303 globals = elf32_arm_hash_table (info);
14304 if (globals == NULL)
14305 return FALSE;
14306
14307 dynobj = elf_hash_table (info)->dynobj;
14308
14309 /* Make sure we know what is going on here. */
14310 BFD_ASSERT (dynobj != NULL
14311 && (h->needs_plt
14312 || h->type == STT_GNU_IFUNC
14313 || h->u.weakdef != NULL
14314 || (h->def_dynamic
14315 && h->ref_regular
14316 && !h->def_regular)));
14317
14318 eh = (struct elf32_arm_link_hash_entry *) h;
14319
14320 /* If this is a function, put it in the procedure linkage table. We
14321 will fill in the contents of the procedure linkage table later,
14322 when we know the address of the .got section. */
14323 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14324 {
14325 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14326 symbol binds locally. */
14327 if (h->plt.refcount <= 0
14328 || (h->type != STT_GNU_IFUNC
14329 && (SYMBOL_CALLS_LOCAL (info, h)
14330 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14331 && h->root.type == bfd_link_hash_undefweak))))
14332 {
14333 /* This case can occur if we saw a PLT32 reloc in an input
14334 file, but the symbol was never referred to by a dynamic
14335 object, or if all references were garbage collected. In
14336 such a case, we don't actually need to build a procedure
14337 linkage table, and we can just do a PC24 reloc instead. */
14338 h->plt.offset = (bfd_vma) -1;
14339 eh->plt.thumb_refcount = 0;
14340 eh->plt.maybe_thumb_refcount = 0;
14341 eh->plt.noncall_refcount = 0;
14342 h->needs_plt = 0;
14343 }
14344
14345 return TRUE;
14346 }
14347 else
14348 {
14349 /* It's possible that we incorrectly decided a .plt reloc was
14350 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14351 in check_relocs. We can't decide accurately between function
14352 and non-function syms in check-relocs; Objects loaded later in
14353 the link may change h->type. So fix it now. */
14354 h->plt.offset = (bfd_vma) -1;
14355 eh->plt.thumb_refcount = 0;
14356 eh->plt.maybe_thumb_refcount = 0;
14357 eh->plt.noncall_refcount = 0;
14358 }
14359
14360 /* If this is a weak symbol, and there is a real definition, the
14361 processor independent code will have arranged for us to see the
14362 real definition first, and we can just use the same value. */
14363 if (h->u.weakdef != NULL)
14364 {
14365 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14366 || h->u.weakdef->root.type == bfd_link_hash_defweak);
14367 h->root.u.def.section = h->u.weakdef->root.u.def.section;
14368 h->root.u.def.value = h->u.weakdef->root.u.def.value;
14369 return TRUE;
14370 }
14371
14372 /* If there are no non-GOT references, we do not need a copy
14373 relocation. */
14374 if (!h->non_got_ref)
14375 return TRUE;
14376
14377 /* This is a reference to a symbol defined by a dynamic object which
14378 is not a function. */
14379
14380 /* If we are creating a shared library, we must presume that the
14381 only references to the symbol are via the global offset table.
14382 For such cases we need not do anything here; the relocations will
14383 be handled correctly by relocate_section. Relocatable executables
14384 can reference data in shared objects directly, so we don't need to
14385 do anything here. */
14386 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14387 return TRUE;
14388
14389 /* We must allocate the symbol in our .dynbss section, which will
14390 become part of the .bss section of the executable. There will be
14391 an entry for this symbol in the .dynsym section. The dynamic
14392 object will contain position independent code, so all references
14393 from the dynamic object to this symbol will go through the global
14394 offset table. The dynamic linker will use the .dynsym entry to
14395 determine the address it must put in the global offset table, so
14396 both the dynamic object and the regular object will refer to the
14397 same memory location for the variable. */
14398 s = bfd_get_linker_section (dynobj, ".dynbss");
14399 BFD_ASSERT (s != NULL);
14400
14401 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14402 linker to copy the initial value out of the dynamic object and into
14403 the runtime process image. We need to remember the offset into the
14404 .rel(a).bss section we are going to use. */
14405 if (info->nocopyreloc == 0
14406 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14407 && h->size != 0)
14408 {
14409 asection *srel;
14410
14411 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14412 elf32_arm_allocate_dynrelocs (info, srel, 1);
14413 h->needs_copy = 1;
14414 }
14415
14416 return _bfd_elf_adjust_dynamic_copy (info, h, s);
14417 }
14418
14419 /* Allocate space in .plt, .got and associated reloc sections for
14420 dynamic relocs. */
14421
14422 static bfd_boolean
14423 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14424 {
14425 struct bfd_link_info *info;
14426 struct elf32_arm_link_hash_table *htab;
14427 struct elf32_arm_link_hash_entry *eh;
14428 struct elf_dyn_relocs *p;
14429
14430 if (h->root.type == bfd_link_hash_indirect)
14431 return TRUE;
14432
14433 eh = (struct elf32_arm_link_hash_entry *) h;
14434
14435 info = (struct bfd_link_info *) inf;
14436 htab = elf32_arm_hash_table (info);
14437 if (htab == NULL)
14438 return FALSE;
14439
14440 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14441 && h->plt.refcount > 0)
14442 {
14443 /* Make sure this symbol is output as a dynamic symbol.
14444 Undefined weak syms won't yet be marked as dynamic. */
14445 if (h->dynindx == -1
14446 && !h->forced_local)
14447 {
14448 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14449 return FALSE;
14450 }
14451
14452 /* If the call in the PLT entry binds locally, the associated
14453 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14454 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14455 than the .plt section. */
14456 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14457 {
14458 eh->is_iplt = 1;
14459 if (eh->plt.noncall_refcount == 0
14460 && SYMBOL_REFERENCES_LOCAL (info, h))
14461 /* All non-call references can be resolved directly.
14462 This means that they can (and in some cases, must)
14463 resolve directly to the run-time target, rather than
14464 to the PLT. That in turns means that any .got entry
14465 would be equal to the .igot.plt entry, so there's
14466 no point having both. */
14467 h->got.refcount = 0;
14468 }
14469
14470 if (bfd_link_pic (info)
14471 || eh->is_iplt
14472 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14473 {
14474 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14475
14476 /* If this symbol is not defined in a regular file, and we are
14477 not generating a shared library, then set the symbol to this
14478 location in the .plt. This is required to make function
14479 pointers compare as equal between the normal executable and
14480 the shared library. */
14481 if (! bfd_link_pic (info)
14482 && !h->def_regular)
14483 {
14484 h->root.u.def.section = htab->root.splt;
14485 h->root.u.def.value = h->plt.offset;
14486
14487 /* Make sure the function is not marked as Thumb, in case
14488 it is the target of an ABS32 relocation, which will
14489 point to the PLT entry. */
14490 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14491 }
14492
14493 /* VxWorks executables have a second set of relocations for
14494 each PLT entry. They go in a separate relocation section,
14495 which is processed by the kernel loader. */
14496 if (htab->vxworks_p && !bfd_link_pic (info))
14497 {
14498 /* There is a relocation for the initial PLT entry:
14499 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14500 if (h->plt.offset == htab->plt_header_size)
14501 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14502
14503 /* There are two extra relocations for each subsequent
14504 PLT entry: an R_ARM_32 relocation for the GOT entry,
14505 and an R_ARM_32 relocation for the PLT entry. */
14506 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14507 }
14508 }
14509 else
14510 {
14511 h->plt.offset = (bfd_vma) -1;
14512 h->needs_plt = 0;
14513 }
14514 }
14515 else
14516 {
14517 h->plt.offset = (bfd_vma) -1;
14518 h->needs_plt = 0;
14519 }
14520
14521 eh = (struct elf32_arm_link_hash_entry *) h;
14522 eh->tlsdesc_got = (bfd_vma) -1;
14523
14524 if (h->got.refcount > 0)
14525 {
14526 asection *s;
14527 bfd_boolean dyn;
14528 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14529 int indx;
14530
14531 /* Make sure this symbol is output as a dynamic symbol.
14532 Undefined weak syms won't yet be marked as dynamic. */
14533 if (h->dynindx == -1
14534 && !h->forced_local)
14535 {
14536 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14537 return FALSE;
14538 }
14539
14540 if (!htab->symbian_p)
14541 {
14542 s = htab->root.sgot;
14543 h->got.offset = s->size;
14544
14545 if (tls_type == GOT_UNKNOWN)
14546 abort ();
14547
14548 if (tls_type == GOT_NORMAL)
14549 /* Non-TLS symbols need one GOT slot. */
14550 s->size += 4;
14551 else
14552 {
14553 if (tls_type & GOT_TLS_GDESC)
14554 {
14555 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14556 eh->tlsdesc_got
14557 = (htab->root.sgotplt->size
14558 - elf32_arm_compute_jump_table_size (htab));
14559 htab->root.sgotplt->size += 8;
14560 h->got.offset = (bfd_vma) -2;
14561 /* plt.got_offset needs to know there's a TLS_DESC
14562 reloc in the middle of .got.plt. */
14563 htab->num_tls_desc++;
14564 }
14565
14566 if (tls_type & GOT_TLS_GD)
14567 {
14568 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14569 the symbol is both GD and GDESC, got.offset may
14570 have been overwritten. */
14571 h->got.offset = s->size;
14572 s->size += 8;
14573 }
14574
14575 if (tls_type & GOT_TLS_IE)
14576 /* R_ARM_TLS_IE32 needs one GOT slot. */
14577 s->size += 4;
14578 }
14579
14580 dyn = htab->root.dynamic_sections_created;
14581
14582 indx = 0;
14583 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14584 bfd_link_pic (info),
14585 h)
14586 && (!bfd_link_pic (info)
14587 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14588 indx = h->dynindx;
14589
14590 if (tls_type != GOT_NORMAL
14591 && (bfd_link_pic (info) || indx != 0)
14592 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14593 || h->root.type != bfd_link_hash_undefweak))
14594 {
14595 if (tls_type & GOT_TLS_IE)
14596 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14597
14598 if (tls_type & GOT_TLS_GD)
14599 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14600
14601 if (tls_type & GOT_TLS_GDESC)
14602 {
14603 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14604 /* GDESC needs a trampoline to jump to. */
14605 htab->tls_trampoline = -1;
14606 }
14607
14608 /* Only GD needs it. GDESC just emits one relocation per
14609 2 entries. */
14610 if ((tls_type & GOT_TLS_GD) && indx != 0)
14611 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14612 }
14613 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14614 {
14615 if (htab->root.dynamic_sections_created)
14616 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14617 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14618 }
14619 else if (h->type == STT_GNU_IFUNC
14620 && eh->plt.noncall_refcount == 0)
14621 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14622 they all resolve dynamically instead. Reserve room for the
14623 GOT entry's R_ARM_IRELATIVE relocation. */
14624 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14625 else if (bfd_link_pic (info)
14626 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14627 || h->root.type != bfd_link_hash_undefweak))
14628 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14629 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14630 }
14631 }
14632 else
14633 h->got.offset = (bfd_vma) -1;
14634
14635 /* Allocate stubs for exported Thumb functions on v4t. */
14636 if (!htab->use_blx && h->dynindx != -1
14637 && h->def_regular
14638 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14639 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14640 {
14641 struct elf_link_hash_entry * th;
14642 struct bfd_link_hash_entry * bh;
14643 struct elf_link_hash_entry * myh;
14644 char name[1024];
14645 asection *s;
14646 bh = NULL;
14647 /* Create a new symbol to regist the real location of the function. */
14648 s = h->root.u.def.section;
14649 sprintf (name, "__real_%s", h->root.root.string);
14650 _bfd_generic_link_add_one_symbol (info, s->owner,
14651 name, BSF_GLOBAL, s,
14652 h->root.u.def.value,
14653 NULL, TRUE, FALSE, &bh);
14654
14655 myh = (struct elf_link_hash_entry *) bh;
14656 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14657 myh->forced_local = 1;
14658 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14659 eh->export_glue = myh;
14660 th = record_arm_to_thumb_glue (info, h);
14661 /* Point the symbol at the stub. */
14662 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14663 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14664 h->root.u.def.section = th->root.u.def.section;
14665 h->root.u.def.value = th->root.u.def.value & ~1;
14666 }
14667
14668 if (eh->dyn_relocs == NULL)
14669 return TRUE;
14670
14671 /* In the shared -Bsymbolic case, discard space allocated for
14672 dynamic pc-relative relocs against symbols which turn out to be
14673 defined in regular objects. For the normal shared case, discard
14674 space for pc-relative relocs that have become local due to symbol
14675 visibility changes. */
14676
14677 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14678 {
14679 /* Relocs that use pc_count are PC-relative forms, which will appear
14680 on something like ".long foo - ." or "movw REG, foo - .". We want
14681 calls to protected symbols to resolve directly to the function
14682 rather than going via the plt. If people want function pointer
14683 comparisons to work as expected then they should avoid writing
14684 assembly like ".long foo - .". */
14685 if (SYMBOL_CALLS_LOCAL (info, h))
14686 {
14687 struct elf_dyn_relocs **pp;
14688
14689 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14690 {
14691 p->count -= p->pc_count;
14692 p->pc_count = 0;
14693 if (p->count == 0)
14694 *pp = p->next;
14695 else
14696 pp = &p->next;
14697 }
14698 }
14699
14700 if (htab->vxworks_p)
14701 {
14702 struct elf_dyn_relocs **pp;
14703
14704 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14705 {
14706 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14707 *pp = p->next;
14708 else
14709 pp = &p->next;
14710 }
14711 }
14712
14713 /* Also discard relocs on undefined weak syms with non-default
14714 visibility. */
14715 if (eh->dyn_relocs != NULL
14716 && h->root.type == bfd_link_hash_undefweak)
14717 {
14718 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14719 eh->dyn_relocs = NULL;
14720
14721 /* Make sure undefined weak symbols are output as a dynamic
14722 symbol in PIEs. */
14723 else if (h->dynindx == -1
14724 && !h->forced_local)
14725 {
14726 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14727 return FALSE;
14728 }
14729 }
14730
14731 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14732 && h->root.type == bfd_link_hash_new)
14733 {
14734 /* Output absolute symbols so that we can create relocations
14735 against them. For normal symbols we output a relocation
14736 against the section that contains them. */
14737 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14738 return FALSE;
14739 }
14740
14741 }
14742 else
14743 {
14744 /* For the non-shared case, discard space for relocs against
14745 symbols which turn out to need copy relocs or are not
14746 dynamic. */
14747
14748 if (!h->non_got_ref
14749 && ((h->def_dynamic
14750 && !h->def_regular)
14751 || (htab->root.dynamic_sections_created
14752 && (h->root.type == bfd_link_hash_undefweak
14753 || h->root.type == bfd_link_hash_undefined))))
14754 {
14755 /* Make sure this symbol is output as a dynamic symbol.
14756 Undefined weak syms won't yet be marked as dynamic. */
14757 if (h->dynindx == -1
14758 && !h->forced_local)
14759 {
14760 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14761 return FALSE;
14762 }
14763
14764 /* If that succeeded, we know we'll be keeping all the
14765 relocs. */
14766 if (h->dynindx != -1)
14767 goto keep;
14768 }
14769
14770 eh->dyn_relocs = NULL;
14771
14772 keep: ;
14773 }
14774
14775 /* Finally, allocate space. */
14776 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14777 {
14778 asection *sreloc = elf_section_data (p->sec)->sreloc;
14779 if (h->type == STT_GNU_IFUNC
14780 && eh->plt.noncall_refcount == 0
14781 && SYMBOL_REFERENCES_LOCAL (info, h))
14782 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14783 else
14784 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14785 }
14786
14787 return TRUE;
14788 }
14789
14790 /* Find any dynamic relocs that apply to read-only sections. */
14791
14792 static bfd_boolean
14793 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14794 {
14795 struct elf32_arm_link_hash_entry * eh;
14796 struct elf_dyn_relocs * p;
14797
14798 eh = (struct elf32_arm_link_hash_entry *) h;
14799 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14800 {
14801 asection *s = p->sec;
14802
14803 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14804 {
14805 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14806
14807 info->flags |= DF_TEXTREL;
14808
14809 /* Not an error, just cut short the traversal. */
14810 return FALSE;
14811 }
14812 }
14813 return TRUE;
14814 }
14815
14816 void
14817 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14818 int byteswap_code)
14819 {
14820 struct elf32_arm_link_hash_table *globals;
14821
14822 globals = elf32_arm_hash_table (info);
14823 if (globals == NULL)
14824 return;
14825
14826 globals->byteswap_code = byteswap_code;
14827 }
14828
14829 /* Set the sizes of the dynamic sections. */
14830
14831 static bfd_boolean
14832 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14833 struct bfd_link_info * info)
14834 {
14835 bfd * dynobj;
14836 asection * s;
14837 bfd_boolean plt;
14838 bfd_boolean relocs;
14839 bfd *ibfd;
14840 struct elf32_arm_link_hash_table *htab;
14841
14842 htab = elf32_arm_hash_table (info);
14843 if (htab == NULL)
14844 return FALSE;
14845
14846 dynobj = elf_hash_table (info)->dynobj;
14847 BFD_ASSERT (dynobj != NULL);
14848 check_use_blx (htab);
14849
14850 if (elf_hash_table (info)->dynamic_sections_created)
14851 {
14852 /* Set the contents of the .interp section to the interpreter. */
14853 if (bfd_link_executable (info) && !info->nointerp)
14854 {
14855 s = bfd_get_linker_section (dynobj, ".interp");
14856 BFD_ASSERT (s != NULL);
14857 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14858 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14859 }
14860 }
14861
14862 /* Set up .got offsets for local syms, and space for local dynamic
14863 relocs. */
14864 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14865 {
14866 bfd_signed_vma *local_got;
14867 bfd_signed_vma *end_local_got;
14868 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14869 char *local_tls_type;
14870 bfd_vma *local_tlsdesc_gotent;
14871 bfd_size_type locsymcount;
14872 Elf_Internal_Shdr *symtab_hdr;
14873 asection *srel;
14874 bfd_boolean is_vxworks = htab->vxworks_p;
14875 unsigned int symndx;
14876
14877 if (! is_arm_elf (ibfd))
14878 continue;
14879
14880 for (s = ibfd->sections; s != NULL; s = s->next)
14881 {
14882 struct elf_dyn_relocs *p;
14883
14884 for (p = (struct elf_dyn_relocs *)
14885 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14886 {
14887 if (!bfd_is_abs_section (p->sec)
14888 && bfd_is_abs_section (p->sec->output_section))
14889 {
14890 /* Input section has been discarded, either because
14891 it is a copy of a linkonce section or due to
14892 linker script /DISCARD/, so we'll be discarding
14893 the relocs too. */
14894 }
14895 else if (is_vxworks
14896 && strcmp (p->sec->output_section->name,
14897 ".tls_vars") == 0)
14898 {
14899 /* Relocations in vxworks .tls_vars sections are
14900 handled specially by the loader. */
14901 }
14902 else if (p->count != 0)
14903 {
14904 srel = elf_section_data (p->sec)->sreloc;
14905 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14906 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14907 info->flags |= DF_TEXTREL;
14908 }
14909 }
14910 }
14911
14912 local_got = elf_local_got_refcounts (ibfd);
14913 if (!local_got)
14914 continue;
14915
14916 symtab_hdr = & elf_symtab_hdr (ibfd);
14917 locsymcount = symtab_hdr->sh_info;
14918 end_local_got = local_got + locsymcount;
14919 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14920 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14921 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14922 symndx = 0;
14923 s = htab->root.sgot;
14924 srel = htab->root.srelgot;
14925 for (; local_got < end_local_got;
14926 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14927 ++local_tlsdesc_gotent, ++symndx)
14928 {
14929 *local_tlsdesc_gotent = (bfd_vma) -1;
14930 local_iplt = *local_iplt_ptr;
14931 if (local_iplt != NULL)
14932 {
14933 struct elf_dyn_relocs *p;
14934
14935 if (local_iplt->root.refcount > 0)
14936 {
14937 elf32_arm_allocate_plt_entry (info, TRUE,
14938 &local_iplt->root,
14939 &local_iplt->arm);
14940 if (local_iplt->arm.noncall_refcount == 0)
14941 /* All references to the PLT are calls, so all
14942 non-call references can resolve directly to the
14943 run-time target. This means that the .got entry
14944 would be the same as the .igot.plt entry, so there's
14945 no point creating both. */
14946 *local_got = 0;
14947 }
14948 else
14949 {
14950 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14951 local_iplt->root.offset = (bfd_vma) -1;
14952 }
14953
14954 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14955 {
14956 asection *psrel;
14957
14958 psrel = elf_section_data (p->sec)->sreloc;
14959 if (local_iplt->arm.noncall_refcount == 0)
14960 elf32_arm_allocate_irelocs (info, psrel, p->count);
14961 else
14962 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14963 }
14964 }
14965 if (*local_got > 0)
14966 {
14967 Elf_Internal_Sym *isym;
14968
14969 *local_got = s->size;
14970 if (*local_tls_type & GOT_TLS_GD)
14971 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14972 s->size += 8;
14973 if (*local_tls_type & GOT_TLS_GDESC)
14974 {
14975 *local_tlsdesc_gotent = htab->root.sgotplt->size
14976 - elf32_arm_compute_jump_table_size (htab);
14977 htab->root.sgotplt->size += 8;
14978 *local_got = (bfd_vma) -2;
14979 /* plt.got_offset needs to know there's a TLS_DESC
14980 reloc in the middle of .got.plt. */
14981 htab->num_tls_desc++;
14982 }
14983 if (*local_tls_type & GOT_TLS_IE)
14984 s->size += 4;
14985
14986 if (*local_tls_type & GOT_NORMAL)
14987 {
14988 /* If the symbol is both GD and GDESC, *local_got
14989 may have been overwritten. */
14990 *local_got = s->size;
14991 s->size += 4;
14992 }
14993
14994 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14995 if (isym == NULL)
14996 return FALSE;
14997
14998 /* If all references to an STT_GNU_IFUNC PLT are calls,
14999 then all non-call references, including this GOT entry,
15000 resolve directly to the run-time target. */
15001 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
15002 && (local_iplt == NULL
15003 || local_iplt->arm.noncall_refcount == 0))
15004 elf32_arm_allocate_irelocs (info, srel, 1);
15005 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
15006 {
15007 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
15008 || *local_tls_type & GOT_TLS_GD)
15009 elf32_arm_allocate_dynrelocs (info, srel, 1);
15010
15011 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
15012 {
15013 elf32_arm_allocate_dynrelocs (info,
15014 htab->root.srelplt, 1);
15015 htab->tls_trampoline = -1;
15016 }
15017 }
15018 }
15019 else
15020 *local_got = (bfd_vma) -1;
15021 }
15022 }
15023
15024 if (htab->tls_ldm_got.refcount > 0)
15025 {
15026 /* Allocate two GOT entries and one dynamic relocation (if necessary)
15027 for R_ARM_TLS_LDM32 relocations. */
15028 htab->tls_ldm_got.offset = htab->root.sgot->size;
15029 htab->root.sgot->size += 8;
15030 if (bfd_link_pic (info))
15031 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15032 }
15033 else
15034 htab->tls_ldm_got.offset = -1;
15035
15036 /* Allocate global sym .plt and .got entries, and space for global
15037 sym dynamic relocs. */
15038 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
15039
15040 /* Here we rummage through the found bfds to collect glue information. */
15041 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15042 {
15043 if (! is_arm_elf (ibfd))
15044 continue;
15045
15046 /* Initialise mapping tables for code/data. */
15047 bfd_elf32_arm_init_maps (ibfd);
15048
15049 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
15050 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
15051 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
15052 /* xgettext:c-format */
15053 _bfd_error_handler (_("Errors encountered processing file %s"),
15054 ibfd->filename);
15055 }
15056
15057 /* Allocate space for the glue sections now that we've sized them. */
15058 bfd_elf32_arm_allocate_interworking_sections (info);
15059
15060 /* For every jump slot reserved in the sgotplt, reloc_count is
15061 incremented. However, when we reserve space for TLS descriptors,
15062 it's not incremented, so in order to compute the space reserved
15063 for them, it suffices to multiply the reloc count by the jump
15064 slot size. */
15065 if (htab->root.srelplt)
15066 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
15067
15068 if (htab->tls_trampoline)
15069 {
15070 if (htab->root.splt->size == 0)
15071 htab->root.splt->size += htab->plt_header_size;
15072
15073 htab->tls_trampoline = htab->root.splt->size;
15074 htab->root.splt->size += htab->plt_entry_size;
15075
15076 /* If we're not using lazy TLS relocations, don't generate the
15077 PLT and GOT entries they require. */
15078 if (!(info->flags & DF_BIND_NOW))
15079 {
15080 htab->dt_tlsdesc_got = htab->root.sgot->size;
15081 htab->root.sgot->size += 4;
15082
15083 htab->dt_tlsdesc_plt = htab->root.splt->size;
15084 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15085 }
15086 }
15087
15088 /* The check_relocs and adjust_dynamic_symbol entry points have
15089 determined the sizes of the various dynamic sections. Allocate
15090 memory for them. */
15091 plt = FALSE;
15092 relocs = FALSE;
15093 for (s = dynobj->sections; s != NULL; s = s->next)
15094 {
15095 const char * name;
15096
15097 if ((s->flags & SEC_LINKER_CREATED) == 0)
15098 continue;
15099
15100 /* It's OK to base decisions on the section name, because none
15101 of the dynobj section names depend upon the input files. */
15102 name = bfd_get_section_name (dynobj, s);
15103
15104 if (s == htab->root.splt)
15105 {
15106 /* Remember whether there is a PLT. */
15107 plt = s->size != 0;
15108 }
15109 else if (CONST_STRNEQ (name, ".rel"))
15110 {
15111 if (s->size != 0)
15112 {
15113 /* Remember whether there are any reloc sections other
15114 than .rel(a).plt and .rela.plt.unloaded. */
15115 if (s != htab->root.srelplt && s != htab->srelplt2)
15116 relocs = TRUE;
15117
15118 /* We use the reloc_count field as a counter if we need
15119 to copy relocs into the output file. */
15120 s->reloc_count = 0;
15121 }
15122 }
15123 else if (s != htab->root.sgot
15124 && s != htab->root.sgotplt
15125 && s != htab->root.iplt
15126 && s != htab->root.igotplt
15127 && s != htab->sdynbss)
15128 {
15129 /* It's not one of our sections, so don't allocate space. */
15130 continue;
15131 }
15132
15133 if (s->size == 0)
15134 {
15135 /* If we don't need this section, strip it from the
15136 output file. This is mostly to handle .rel(a).bss and
15137 .rel(a).plt. We must create both sections in
15138 create_dynamic_sections, because they must be created
15139 before the linker maps input sections to output
15140 sections. The linker does that before
15141 adjust_dynamic_symbol is called, and it is that
15142 function which decides whether anything needs to go
15143 into these sections. */
15144 s->flags |= SEC_EXCLUDE;
15145 continue;
15146 }
15147
15148 if ((s->flags & SEC_HAS_CONTENTS) == 0)
15149 continue;
15150
15151 /* Allocate memory for the section contents. */
15152 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
15153 if (s->contents == NULL)
15154 return FALSE;
15155 }
15156
15157 if (elf_hash_table (info)->dynamic_sections_created)
15158 {
15159 /* Add some entries to the .dynamic section. We fill in the
15160 values later, in elf32_arm_finish_dynamic_sections, but we
15161 must add the entries now so that we get the correct size for
15162 the .dynamic section. The DT_DEBUG entry is filled in by the
15163 dynamic linker and used by the debugger. */
15164 #define add_dynamic_entry(TAG, VAL) \
15165 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15166
15167 if (bfd_link_executable (info))
15168 {
15169 if (!add_dynamic_entry (DT_DEBUG, 0))
15170 return FALSE;
15171 }
15172
15173 if (plt)
15174 {
15175 if ( !add_dynamic_entry (DT_PLTGOT, 0)
15176 || !add_dynamic_entry (DT_PLTRELSZ, 0)
15177 || !add_dynamic_entry (DT_PLTREL,
15178 htab->use_rel ? DT_REL : DT_RELA)
15179 || !add_dynamic_entry (DT_JMPREL, 0))
15180 return FALSE;
15181
15182 if (htab->dt_tlsdesc_plt &&
15183 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
15184 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
15185 return FALSE;
15186 }
15187
15188 if (relocs)
15189 {
15190 if (htab->use_rel)
15191 {
15192 if (!add_dynamic_entry (DT_REL, 0)
15193 || !add_dynamic_entry (DT_RELSZ, 0)
15194 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
15195 return FALSE;
15196 }
15197 else
15198 {
15199 if (!add_dynamic_entry (DT_RELA, 0)
15200 || !add_dynamic_entry (DT_RELASZ, 0)
15201 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15202 return FALSE;
15203 }
15204 }
15205
15206 /* If any dynamic relocs apply to a read-only section,
15207 then we need a DT_TEXTREL entry. */
15208 if ((info->flags & DF_TEXTREL) == 0)
15209 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15210 info);
15211
15212 if ((info->flags & DF_TEXTREL) != 0)
15213 {
15214 if (!add_dynamic_entry (DT_TEXTREL, 0))
15215 return FALSE;
15216 }
15217 if (htab->vxworks_p
15218 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15219 return FALSE;
15220 }
15221 #undef add_dynamic_entry
15222
15223 return TRUE;
15224 }
15225
15226 /* Size sections even though they're not dynamic. We use it to setup
15227 _TLS_MODULE_BASE_, if needed. */
15228
15229 static bfd_boolean
15230 elf32_arm_always_size_sections (bfd *output_bfd,
15231 struct bfd_link_info *info)
15232 {
15233 asection *tls_sec;
15234
15235 if (bfd_link_relocatable (info))
15236 return TRUE;
15237
15238 tls_sec = elf_hash_table (info)->tls_sec;
15239
15240 if (tls_sec)
15241 {
15242 struct elf_link_hash_entry *tlsbase;
15243
15244 tlsbase = elf_link_hash_lookup
15245 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15246
15247 if (tlsbase)
15248 {
15249 struct bfd_link_hash_entry *bh = NULL;
15250 const struct elf_backend_data *bed
15251 = get_elf_backend_data (output_bfd);
15252
15253 if (!(_bfd_generic_link_add_one_symbol
15254 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15255 tls_sec, 0, NULL, FALSE,
15256 bed->collect, &bh)))
15257 return FALSE;
15258
15259 tlsbase->type = STT_TLS;
15260 tlsbase = (struct elf_link_hash_entry *)bh;
15261 tlsbase->def_regular = 1;
15262 tlsbase->other = STV_HIDDEN;
15263 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15264 }
15265 }
15266 return TRUE;
15267 }
15268
15269 /* Finish up dynamic symbol handling. We set the contents of various
15270 dynamic sections here. */
15271
15272 static bfd_boolean
15273 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15274 struct bfd_link_info * info,
15275 struct elf_link_hash_entry * h,
15276 Elf_Internal_Sym * sym)
15277 {
15278 struct elf32_arm_link_hash_table *htab;
15279 struct elf32_arm_link_hash_entry *eh;
15280
15281 htab = elf32_arm_hash_table (info);
15282 if (htab == NULL)
15283 return FALSE;
15284
15285 eh = (struct elf32_arm_link_hash_entry *) h;
15286
15287 if (h->plt.offset != (bfd_vma) -1)
15288 {
15289 if (!eh->is_iplt)
15290 {
15291 BFD_ASSERT (h->dynindx != -1);
15292 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15293 h->dynindx, 0))
15294 return FALSE;
15295 }
15296
15297 if (!h->def_regular)
15298 {
15299 /* Mark the symbol as undefined, rather than as defined in
15300 the .plt section. */
15301 sym->st_shndx = SHN_UNDEF;
15302 /* If the symbol is weak we need to clear the value.
15303 Otherwise, the PLT entry would provide a definition for
15304 the symbol even if the symbol wasn't defined anywhere,
15305 and so the symbol would never be NULL. Leave the value if
15306 there were any relocations where pointer equality matters
15307 (this is a clue for the dynamic linker, to make function
15308 pointer comparisons work between an application and shared
15309 library). */
15310 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15311 sym->st_value = 0;
15312 }
15313 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15314 {
15315 /* At least one non-call relocation references this .iplt entry,
15316 so the .iplt entry is the function's canonical address. */
15317 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15318 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15319 sym->st_shndx = (_bfd_elf_section_from_bfd_section
15320 (output_bfd, htab->root.iplt->output_section));
15321 sym->st_value = (h->plt.offset
15322 + htab->root.iplt->output_section->vma
15323 + htab->root.iplt->output_offset);
15324 }
15325 }
15326
15327 if (h->needs_copy)
15328 {
15329 asection * s;
15330 Elf_Internal_Rela rel;
15331
15332 /* This symbol needs a copy reloc. Set it up. */
15333 BFD_ASSERT (h->dynindx != -1
15334 && (h->root.type == bfd_link_hash_defined
15335 || h->root.type == bfd_link_hash_defweak));
15336
15337 s = htab->srelbss;
15338 BFD_ASSERT (s != NULL);
15339
15340 rel.r_addend = 0;
15341 rel.r_offset = (h->root.u.def.value
15342 + h->root.u.def.section->output_section->vma
15343 + h->root.u.def.section->output_offset);
15344 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15345 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15346 }
15347
15348 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15349 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15350 to the ".got" section. */
15351 if (h == htab->root.hdynamic
15352 || (!htab->vxworks_p && h == htab->root.hgot))
15353 sym->st_shndx = SHN_ABS;
15354
15355 return TRUE;
15356 }
15357
15358 static void
15359 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15360 void *contents,
15361 const unsigned long *template, unsigned count)
15362 {
15363 unsigned ix;
15364
15365 for (ix = 0; ix != count; ix++)
15366 {
15367 unsigned long insn = template[ix];
15368
15369 /* Emit mov pc,rx if bx is not permitted. */
15370 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15371 insn = (insn & 0xf000000f) | 0x01a0f000;
15372 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15373 }
15374 }
15375
15376 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15377 other variants, NaCl needs this entry in a static executable's
15378 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15379 zero. For .iplt really only the last bundle is useful, and .iplt
15380 could have a shorter first entry, with each individual PLT entry's
15381 relative branch calculated differently so it targets the last
15382 bundle instead of the instruction before it (labelled .Lplt_tail
15383 above). But it's simpler to keep the size and layout of PLT0
15384 consistent with the dynamic case, at the cost of some dead code at
15385 the start of .iplt and the one dead store to the stack at the start
15386 of .Lplt_tail. */
15387 static void
15388 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15389 asection *plt, bfd_vma got_displacement)
15390 {
15391 unsigned int i;
15392
15393 put_arm_insn (htab, output_bfd,
15394 elf32_arm_nacl_plt0_entry[0]
15395 | arm_movw_immediate (got_displacement),
15396 plt->contents + 0);
15397 put_arm_insn (htab, output_bfd,
15398 elf32_arm_nacl_plt0_entry[1]
15399 | arm_movt_immediate (got_displacement),
15400 plt->contents + 4);
15401
15402 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15403 put_arm_insn (htab, output_bfd,
15404 elf32_arm_nacl_plt0_entry[i],
15405 plt->contents + (i * 4));
15406 }
15407
15408 /* Finish up the dynamic sections. */
15409
15410 static bfd_boolean
15411 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15412 {
15413 bfd * dynobj;
15414 asection * sgot;
15415 asection * sdyn;
15416 struct elf32_arm_link_hash_table *htab;
15417
15418 htab = elf32_arm_hash_table (info);
15419 if (htab == NULL)
15420 return FALSE;
15421
15422 dynobj = elf_hash_table (info)->dynobj;
15423
15424 sgot = htab->root.sgotplt;
15425 /* A broken linker script might have discarded the dynamic sections.
15426 Catch this here so that we do not seg-fault later on. */
15427 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15428 return FALSE;
15429 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15430
15431 if (elf_hash_table (info)->dynamic_sections_created)
15432 {
15433 asection *splt;
15434 Elf32_External_Dyn *dyncon, *dynconend;
15435
15436 splt = htab->root.splt;
15437 BFD_ASSERT (splt != NULL && sdyn != NULL);
15438 BFD_ASSERT (htab->symbian_p || sgot != NULL);
15439
15440 dyncon = (Elf32_External_Dyn *) sdyn->contents;
15441 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15442
15443 for (; dyncon < dynconend; dyncon++)
15444 {
15445 Elf_Internal_Dyn dyn;
15446 const char * name;
15447 asection * s;
15448
15449 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15450
15451 switch (dyn.d_tag)
15452 {
15453 unsigned int type;
15454
15455 default:
15456 if (htab->vxworks_p
15457 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15458 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15459 break;
15460
15461 case DT_HASH:
15462 name = ".hash";
15463 goto get_vma_if_bpabi;
15464 case DT_STRTAB:
15465 name = ".dynstr";
15466 goto get_vma_if_bpabi;
15467 case DT_SYMTAB:
15468 name = ".dynsym";
15469 goto get_vma_if_bpabi;
15470 case DT_VERSYM:
15471 name = ".gnu.version";
15472 goto get_vma_if_bpabi;
15473 case DT_VERDEF:
15474 name = ".gnu.version_d";
15475 goto get_vma_if_bpabi;
15476 case DT_VERNEED:
15477 name = ".gnu.version_r";
15478 goto get_vma_if_bpabi;
15479
15480 case DT_PLTGOT:
15481 name = htab->symbian_p ? ".got" : ".got.plt";
15482 goto get_vma;
15483 case DT_JMPREL:
15484 name = RELOC_SECTION (htab, ".plt");
15485 get_vma:
15486 s = bfd_get_linker_section (dynobj, name);
15487 if (s == NULL)
15488 {
15489 (*_bfd_error_handler)
15490 (_("could not find section %s"), name);
15491 bfd_set_error (bfd_error_invalid_operation);
15492 return FALSE;
15493 }
15494 if (!htab->symbian_p)
15495 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15496 else
15497 /* In the BPABI, tags in the PT_DYNAMIC section point
15498 at the file offset, not the memory address, for the
15499 convenience of the post linker. */
15500 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15501 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15502 break;
15503
15504 get_vma_if_bpabi:
15505 if (htab->symbian_p)
15506 goto get_vma;
15507 break;
15508
15509 case DT_PLTRELSZ:
15510 s = htab->root.srelplt;
15511 BFD_ASSERT (s != NULL);
15512 dyn.d_un.d_val = s->size;
15513 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15514 break;
15515
15516 case DT_RELSZ:
15517 case DT_RELASZ:
15518 if (!htab->symbian_p)
15519 {
15520 /* My reading of the SVR4 ABI indicates that the
15521 procedure linkage table relocs (DT_JMPREL) should be
15522 included in the overall relocs (DT_REL). This is
15523 what Solaris does. However, UnixWare can not handle
15524 that case. Therefore, we override the DT_RELSZ entry
15525 here to make it not include the JMPREL relocs. Since
15526 the linker script arranges for .rel(a).plt to follow all
15527 other relocation sections, we don't have to worry
15528 about changing the DT_REL entry. */
15529 s = htab->root.srelplt;
15530 if (s != NULL)
15531 dyn.d_un.d_val -= s->size;
15532 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15533 break;
15534 }
15535 /* Fall through. */
15536
15537 case DT_REL:
15538 case DT_RELA:
15539 /* In the BPABI, the DT_REL tag must point at the file
15540 offset, not the VMA, of the first relocation
15541 section. So, we use code similar to that in
15542 elflink.c, but do not check for SHF_ALLOC on the
15543 relcoation section, since relocations sections are
15544 never allocated under the BPABI. The comments above
15545 about Unixware notwithstanding, we include all of the
15546 relocations here. */
15547 if (htab->symbian_p)
15548 {
15549 unsigned int i;
15550 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15551 ? SHT_REL : SHT_RELA);
15552 dyn.d_un.d_val = 0;
15553 for (i = 1; i < elf_numsections (output_bfd); i++)
15554 {
15555 Elf_Internal_Shdr *hdr
15556 = elf_elfsections (output_bfd)[i];
15557 if (hdr->sh_type == type)
15558 {
15559 if (dyn.d_tag == DT_RELSZ
15560 || dyn.d_tag == DT_RELASZ)
15561 dyn.d_un.d_val += hdr->sh_size;
15562 else if ((ufile_ptr) hdr->sh_offset
15563 <= dyn.d_un.d_val - 1)
15564 dyn.d_un.d_val = hdr->sh_offset;
15565 }
15566 }
15567 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15568 }
15569 break;
15570
15571 case DT_TLSDESC_PLT:
15572 s = htab->root.splt;
15573 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15574 + htab->dt_tlsdesc_plt);
15575 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15576 break;
15577
15578 case DT_TLSDESC_GOT:
15579 s = htab->root.sgot;
15580 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15581 + htab->dt_tlsdesc_got);
15582 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15583 break;
15584
15585 /* Set the bottom bit of DT_INIT/FINI if the
15586 corresponding function is Thumb. */
15587 case DT_INIT:
15588 name = info->init_function;
15589 goto get_sym;
15590 case DT_FINI:
15591 name = info->fini_function;
15592 get_sym:
15593 /* If it wasn't set by elf_bfd_final_link
15594 then there is nothing to adjust. */
15595 if (dyn.d_un.d_val != 0)
15596 {
15597 struct elf_link_hash_entry * eh;
15598
15599 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15600 FALSE, FALSE, TRUE);
15601 if (eh != NULL
15602 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15603 == ST_BRANCH_TO_THUMB)
15604 {
15605 dyn.d_un.d_val |= 1;
15606 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15607 }
15608 }
15609 break;
15610 }
15611 }
15612
15613 /* Fill in the first entry in the procedure linkage table. */
15614 if (splt->size > 0 && htab->plt_header_size)
15615 {
15616 const bfd_vma *plt0_entry;
15617 bfd_vma got_address, plt_address, got_displacement;
15618
15619 /* Calculate the addresses of the GOT and PLT. */
15620 got_address = sgot->output_section->vma + sgot->output_offset;
15621 plt_address = splt->output_section->vma + splt->output_offset;
15622
15623 if (htab->vxworks_p)
15624 {
15625 /* The VxWorks GOT is relocated by the dynamic linker.
15626 Therefore, we must emit relocations rather than simply
15627 computing the values now. */
15628 Elf_Internal_Rela rel;
15629
15630 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15631 put_arm_insn (htab, output_bfd, plt0_entry[0],
15632 splt->contents + 0);
15633 put_arm_insn (htab, output_bfd, plt0_entry[1],
15634 splt->contents + 4);
15635 put_arm_insn (htab, output_bfd, plt0_entry[2],
15636 splt->contents + 8);
15637 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15638
15639 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15640 rel.r_offset = plt_address + 12;
15641 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15642 rel.r_addend = 0;
15643 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15644 htab->srelplt2->contents);
15645 }
15646 else if (htab->nacl_p)
15647 arm_nacl_put_plt0 (htab, output_bfd, splt,
15648 got_address + 8 - (plt_address + 16));
15649 else if (using_thumb_only (htab))
15650 {
15651 got_displacement = got_address - (plt_address + 12);
15652
15653 plt0_entry = elf32_thumb2_plt0_entry;
15654 put_arm_insn (htab, output_bfd, plt0_entry[0],
15655 splt->contents + 0);
15656 put_arm_insn (htab, output_bfd, plt0_entry[1],
15657 splt->contents + 4);
15658 put_arm_insn (htab, output_bfd, plt0_entry[2],
15659 splt->contents + 8);
15660
15661 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15662 }
15663 else
15664 {
15665 got_displacement = got_address - (plt_address + 16);
15666
15667 plt0_entry = elf32_arm_plt0_entry;
15668 put_arm_insn (htab, output_bfd, plt0_entry[0],
15669 splt->contents + 0);
15670 put_arm_insn (htab, output_bfd, plt0_entry[1],
15671 splt->contents + 4);
15672 put_arm_insn (htab, output_bfd, plt0_entry[2],
15673 splt->contents + 8);
15674 put_arm_insn (htab, output_bfd, plt0_entry[3],
15675 splt->contents + 12);
15676
15677 #ifdef FOUR_WORD_PLT
15678 /* The displacement value goes in the otherwise-unused
15679 last word of the second entry. */
15680 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15681 #else
15682 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15683 #endif
15684 }
15685 }
15686
15687 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15688 really seem like the right value. */
15689 if (splt->output_section->owner == output_bfd)
15690 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15691
15692 if (htab->dt_tlsdesc_plt)
15693 {
15694 bfd_vma got_address
15695 = sgot->output_section->vma + sgot->output_offset;
15696 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15697 + htab->root.sgot->output_offset);
15698 bfd_vma plt_address
15699 = splt->output_section->vma + splt->output_offset;
15700
15701 arm_put_trampoline (htab, output_bfd,
15702 splt->contents + htab->dt_tlsdesc_plt,
15703 dl_tlsdesc_lazy_trampoline, 6);
15704
15705 bfd_put_32 (output_bfd,
15706 gotplt_address + htab->dt_tlsdesc_got
15707 - (plt_address + htab->dt_tlsdesc_plt)
15708 - dl_tlsdesc_lazy_trampoline[6],
15709 splt->contents + htab->dt_tlsdesc_plt + 24);
15710 bfd_put_32 (output_bfd,
15711 got_address - (plt_address + htab->dt_tlsdesc_plt)
15712 - dl_tlsdesc_lazy_trampoline[7],
15713 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15714 }
15715
15716 if (htab->tls_trampoline)
15717 {
15718 arm_put_trampoline (htab, output_bfd,
15719 splt->contents + htab->tls_trampoline,
15720 tls_trampoline, 3);
15721 #ifdef FOUR_WORD_PLT
15722 bfd_put_32 (output_bfd, 0x00000000,
15723 splt->contents + htab->tls_trampoline + 12);
15724 #endif
15725 }
15726
15727 if (htab->vxworks_p
15728 && !bfd_link_pic (info)
15729 && htab->root.splt->size > 0)
15730 {
15731 /* Correct the .rel(a).plt.unloaded relocations. They will have
15732 incorrect symbol indexes. */
15733 int num_plts;
15734 unsigned char *p;
15735
15736 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15737 / htab->plt_entry_size);
15738 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15739
15740 for (; num_plts; num_plts--)
15741 {
15742 Elf_Internal_Rela rel;
15743
15744 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15745 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15746 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15747 p += RELOC_SIZE (htab);
15748
15749 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15750 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15751 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15752 p += RELOC_SIZE (htab);
15753 }
15754 }
15755 }
15756
15757 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15758 /* NaCl uses a special first entry in .iplt too. */
15759 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15760
15761 /* Fill in the first three entries in the global offset table. */
15762 if (sgot)
15763 {
15764 if (sgot->size > 0)
15765 {
15766 if (sdyn == NULL)
15767 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15768 else
15769 bfd_put_32 (output_bfd,
15770 sdyn->output_section->vma + sdyn->output_offset,
15771 sgot->contents);
15772 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15773 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15774 }
15775
15776 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15777 }
15778
15779 return TRUE;
15780 }
15781
15782 static void
15783 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15784 {
15785 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15786 struct elf32_arm_link_hash_table *globals;
15787 struct elf_segment_map *m;
15788
15789 i_ehdrp = elf_elfheader (abfd);
15790
15791 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15792 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15793 else
15794 _bfd_elf_post_process_headers (abfd, link_info);
15795 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15796
15797 if (link_info)
15798 {
15799 globals = elf32_arm_hash_table (link_info);
15800 if (globals != NULL && globals->byteswap_code)
15801 i_ehdrp->e_flags |= EF_ARM_BE8;
15802 }
15803
15804 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15805 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15806 {
15807 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15808 if (abi == AEABI_VFP_args_vfp)
15809 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15810 else
15811 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15812 }
15813
15814 /* Scan segment to set p_flags attribute if it contains only sections with
15815 SHF_ARM_PURECODE flag. */
15816 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15817 {
15818 unsigned int j;
15819
15820 if (m->count == 0)
15821 continue;
15822 for (j = 0; j < m->count; j++)
15823 {
15824 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
15825 break;
15826 }
15827 if (j == m->count)
15828 {
15829 m->p_flags = PF_X;
15830 m->p_flags_valid = 1;
15831 }
15832 }
15833 }
15834
15835 static enum elf_reloc_type_class
15836 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15837 const asection *rel_sec ATTRIBUTE_UNUSED,
15838 const Elf_Internal_Rela *rela)
15839 {
15840 switch ((int) ELF32_R_TYPE (rela->r_info))
15841 {
15842 case R_ARM_RELATIVE:
15843 return reloc_class_relative;
15844 case R_ARM_JUMP_SLOT:
15845 return reloc_class_plt;
15846 case R_ARM_COPY:
15847 return reloc_class_copy;
15848 case R_ARM_IRELATIVE:
15849 return reloc_class_ifunc;
15850 default:
15851 return reloc_class_normal;
15852 }
15853 }
15854
15855 static void
15856 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15857 {
15858 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15859 }
15860
15861 /* Return TRUE if this is an unwinding table entry. */
15862
15863 static bfd_boolean
15864 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15865 {
15866 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15867 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15868 }
15869
15870
15871 /* Set the type and flags for an ARM section. We do this by
15872 the section name, which is a hack, but ought to work. */
15873
15874 static bfd_boolean
15875 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15876 {
15877 const char * name;
15878
15879 name = bfd_get_section_name (abfd, sec);
15880
15881 if (is_arm_elf_unwind_section_name (abfd, name))
15882 {
15883 hdr->sh_type = SHT_ARM_EXIDX;
15884 hdr->sh_flags |= SHF_LINK_ORDER;
15885 }
15886
15887 if (sec->flags & SEC_ELF_PURECODE)
15888 hdr->sh_flags |= SHF_ARM_PURECODE;
15889
15890 return TRUE;
15891 }
15892
15893 /* Handle an ARM specific section when reading an object file. This is
15894 called when bfd_section_from_shdr finds a section with an unknown
15895 type. */
15896
15897 static bfd_boolean
15898 elf32_arm_section_from_shdr (bfd *abfd,
15899 Elf_Internal_Shdr * hdr,
15900 const char *name,
15901 int shindex)
15902 {
15903 /* There ought to be a place to keep ELF backend specific flags, but
15904 at the moment there isn't one. We just keep track of the
15905 sections by their name, instead. Fortunately, the ABI gives
15906 names for all the ARM specific sections, so we will probably get
15907 away with this. */
15908 switch (hdr->sh_type)
15909 {
15910 case SHT_ARM_EXIDX:
15911 case SHT_ARM_PREEMPTMAP:
15912 case SHT_ARM_ATTRIBUTES:
15913 break;
15914
15915 default:
15916 return FALSE;
15917 }
15918
15919 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15920 return FALSE;
15921
15922 return TRUE;
15923 }
15924
15925 static _arm_elf_section_data *
15926 get_arm_elf_section_data (asection * sec)
15927 {
15928 if (sec && sec->owner && is_arm_elf (sec->owner))
15929 return elf32_arm_section_data (sec);
15930 else
15931 return NULL;
15932 }
15933
15934 typedef struct
15935 {
15936 void *flaginfo;
15937 struct bfd_link_info *info;
15938 asection *sec;
15939 int sec_shndx;
15940 int (*func) (void *, const char *, Elf_Internal_Sym *,
15941 asection *, struct elf_link_hash_entry *);
15942 } output_arch_syminfo;
15943
15944 enum map_symbol_type
15945 {
15946 ARM_MAP_ARM,
15947 ARM_MAP_THUMB,
15948 ARM_MAP_DATA
15949 };
15950
15951
15952 /* Output a single mapping symbol. */
15953
15954 static bfd_boolean
15955 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15956 enum map_symbol_type type,
15957 bfd_vma offset)
15958 {
15959 static const char *names[3] = {"$a", "$t", "$d"};
15960 Elf_Internal_Sym sym;
15961
15962 sym.st_value = osi->sec->output_section->vma
15963 + osi->sec->output_offset
15964 + offset;
15965 sym.st_size = 0;
15966 sym.st_other = 0;
15967 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15968 sym.st_shndx = osi->sec_shndx;
15969 sym.st_target_internal = 0;
15970 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15971 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15972 }
15973
15974 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15975 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15976
15977 static bfd_boolean
15978 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15979 bfd_boolean is_iplt_entry_p,
15980 union gotplt_union *root_plt,
15981 struct arm_plt_info *arm_plt)
15982 {
15983 struct elf32_arm_link_hash_table *htab;
15984 bfd_vma addr, plt_header_size;
15985
15986 if (root_plt->offset == (bfd_vma) -1)
15987 return TRUE;
15988
15989 htab = elf32_arm_hash_table (osi->info);
15990 if (htab == NULL)
15991 return FALSE;
15992
15993 if (is_iplt_entry_p)
15994 {
15995 osi->sec = htab->root.iplt;
15996 plt_header_size = 0;
15997 }
15998 else
15999 {
16000 osi->sec = htab->root.splt;
16001 plt_header_size = htab->plt_header_size;
16002 }
16003 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
16004 (osi->info->output_bfd, osi->sec->output_section));
16005
16006 addr = root_plt->offset & -2;
16007 if (htab->symbian_p)
16008 {
16009 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16010 return FALSE;
16011 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
16012 return FALSE;
16013 }
16014 else if (htab->vxworks_p)
16015 {
16016 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16017 return FALSE;
16018 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
16019 return FALSE;
16020 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
16021 return FALSE;
16022 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
16023 return FALSE;
16024 }
16025 else if (htab->nacl_p)
16026 {
16027 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16028 return FALSE;
16029 }
16030 else if (using_thumb_only (htab))
16031 {
16032 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
16033 return FALSE;
16034 }
16035 else
16036 {
16037 bfd_boolean thumb_stub_p;
16038
16039 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
16040 if (thumb_stub_p)
16041 {
16042 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
16043 return FALSE;
16044 }
16045 #ifdef FOUR_WORD_PLT
16046 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16047 return FALSE;
16048 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
16049 return FALSE;
16050 #else
16051 /* A three-word PLT with no Thumb thunk contains only Arm code,
16052 so only need to output a mapping symbol for the first PLT entry and
16053 entries with thumb thunks. */
16054 if (thumb_stub_p || addr == plt_header_size)
16055 {
16056 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16057 return FALSE;
16058 }
16059 #endif
16060 }
16061
16062 return TRUE;
16063 }
16064
16065 /* Output mapping symbols for PLT entries associated with H. */
16066
16067 static bfd_boolean
16068 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
16069 {
16070 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16071 struct elf32_arm_link_hash_entry *eh;
16072
16073 if (h->root.type == bfd_link_hash_indirect)
16074 return TRUE;
16075
16076 if (h->root.type == bfd_link_hash_warning)
16077 /* When warning symbols are created, they **replace** the "real"
16078 entry in the hash table, thus we never get to see the real
16079 symbol in a hash traversal. So look at it now. */
16080 h = (struct elf_link_hash_entry *) h->root.u.i.link;
16081
16082 eh = (struct elf32_arm_link_hash_entry *) h;
16083 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16084 &h->plt, &eh->plt);
16085 }
16086
16087 /* Bind a veneered symbol to its veneer identified by its hash entry
16088 STUB_ENTRY. The veneered location thus loose its symbol. */
16089
16090 static void
16091 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16092 {
16093 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16094
16095 BFD_ASSERT (hash);
16096 hash->root.root.u.def.section = stub_entry->stub_sec;
16097 hash->root.root.u.def.value = stub_entry->stub_offset;
16098 hash->root.size = stub_entry->stub_size;
16099 }
16100
16101 /* Output a single local symbol for a generated stub. */
16102
16103 static bfd_boolean
16104 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16105 bfd_vma offset, bfd_vma size)
16106 {
16107 Elf_Internal_Sym sym;
16108
16109 sym.st_value = osi->sec->output_section->vma
16110 + osi->sec->output_offset
16111 + offset;
16112 sym.st_size = size;
16113 sym.st_other = 0;
16114 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16115 sym.st_shndx = osi->sec_shndx;
16116 sym.st_target_internal = 0;
16117 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16118 }
16119
16120 static bfd_boolean
16121 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16122 void * in_arg)
16123 {
16124 struct elf32_arm_stub_hash_entry *stub_entry;
16125 asection *stub_sec;
16126 bfd_vma addr;
16127 char *stub_name;
16128 output_arch_syminfo *osi;
16129 const insn_sequence *template_sequence;
16130 enum stub_insn_type prev_type;
16131 int size;
16132 int i;
16133 enum map_symbol_type sym_type;
16134
16135 /* Massage our args to the form they really have. */
16136 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16137 osi = (output_arch_syminfo *) in_arg;
16138
16139 stub_sec = stub_entry->stub_sec;
16140
16141 /* Ensure this stub is attached to the current section being
16142 processed. */
16143 if (stub_sec != osi->sec)
16144 return TRUE;
16145
16146 addr = (bfd_vma) stub_entry->stub_offset;
16147 template_sequence = stub_entry->stub_template;
16148
16149 if (arm_stub_sym_claimed (stub_entry->stub_type))
16150 arm_stub_claim_sym (stub_entry);
16151 else
16152 {
16153 stub_name = stub_entry->output_name;
16154 switch (template_sequence[0].type)
16155 {
16156 case ARM_TYPE:
16157 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
16158 stub_entry->stub_size))
16159 return FALSE;
16160 break;
16161 case THUMB16_TYPE:
16162 case THUMB32_TYPE:
16163 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
16164 stub_entry->stub_size))
16165 return FALSE;
16166 break;
16167 default:
16168 BFD_FAIL ();
16169 return 0;
16170 }
16171 }
16172
16173 prev_type = DATA_TYPE;
16174 size = 0;
16175 for (i = 0; i < stub_entry->stub_template_size; i++)
16176 {
16177 switch (template_sequence[i].type)
16178 {
16179 case ARM_TYPE:
16180 sym_type = ARM_MAP_ARM;
16181 break;
16182
16183 case THUMB16_TYPE:
16184 case THUMB32_TYPE:
16185 sym_type = ARM_MAP_THUMB;
16186 break;
16187
16188 case DATA_TYPE:
16189 sym_type = ARM_MAP_DATA;
16190 break;
16191
16192 default:
16193 BFD_FAIL ();
16194 return FALSE;
16195 }
16196
16197 if (template_sequence[i].type != prev_type)
16198 {
16199 prev_type = template_sequence[i].type;
16200 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16201 return FALSE;
16202 }
16203
16204 switch (template_sequence[i].type)
16205 {
16206 case ARM_TYPE:
16207 case THUMB32_TYPE:
16208 size += 4;
16209 break;
16210
16211 case THUMB16_TYPE:
16212 size += 2;
16213 break;
16214
16215 case DATA_TYPE:
16216 size += 4;
16217 break;
16218
16219 default:
16220 BFD_FAIL ();
16221 return FALSE;
16222 }
16223 }
16224
16225 return TRUE;
16226 }
16227
16228 /* Output mapping symbols for linker generated sections,
16229 and for those data-only sections that do not have a
16230 $d. */
16231
16232 static bfd_boolean
16233 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16234 struct bfd_link_info *info,
16235 void *flaginfo,
16236 int (*func) (void *, const char *,
16237 Elf_Internal_Sym *,
16238 asection *,
16239 struct elf_link_hash_entry *))
16240 {
16241 output_arch_syminfo osi;
16242 struct elf32_arm_link_hash_table *htab;
16243 bfd_vma offset;
16244 bfd_size_type size;
16245 bfd *input_bfd;
16246
16247 htab = elf32_arm_hash_table (info);
16248 if (htab == NULL)
16249 return FALSE;
16250
16251 check_use_blx (htab);
16252
16253 osi.flaginfo = flaginfo;
16254 osi.info = info;
16255 osi.func = func;
16256
16257 /* Add a $d mapping symbol to data-only sections that
16258 don't have any mapping symbol. This may result in (harmless) redundant
16259 mapping symbols. */
16260 for (input_bfd = info->input_bfds;
16261 input_bfd != NULL;
16262 input_bfd = input_bfd->link.next)
16263 {
16264 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16265 for (osi.sec = input_bfd->sections;
16266 osi.sec != NULL;
16267 osi.sec = osi.sec->next)
16268 {
16269 if (osi.sec->output_section != NULL
16270 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16271 != 0)
16272 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16273 == SEC_HAS_CONTENTS
16274 && get_arm_elf_section_data (osi.sec) != NULL
16275 && get_arm_elf_section_data (osi.sec)->mapcount == 0
16276 && osi.sec->size > 0
16277 && (osi.sec->flags & SEC_EXCLUDE) == 0)
16278 {
16279 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16280 (output_bfd, osi.sec->output_section);
16281 if (osi.sec_shndx != (int)SHN_BAD)
16282 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16283 }
16284 }
16285 }
16286
16287 /* ARM->Thumb glue. */
16288 if (htab->arm_glue_size > 0)
16289 {
16290 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16291 ARM2THUMB_GLUE_SECTION_NAME);
16292
16293 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16294 (output_bfd, osi.sec->output_section);
16295 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16296 || htab->pic_veneer)
16297 size = ARM2THUMB_PIC_GLUE_SIZE;
16298 else if (htab->use_blx)
16299 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16300 else
16301 size = ARM2THUMB_STATIC_GLUE_SIZE;
16302
16303 for (offset = 0; offset < htab->arm_glue_size; offset += size)
16304 {
16305 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16306 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16307 }
16308 }
16309
16310 /* Thumb->ARM glue. */
16311 if (htab->thumb_glue_size > 0)
16312 {
16313 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16314 THUMB2ARM_GLUE_SECTION_NAME);
16315
16316 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16317 (output_bfd, osi.sec->output_section);
16318 size = THUMB2ARM_GLUE_SIZE;
16319
16320 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16321 {
16322 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16323 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16324 }
16325 }
16326
16327 /* ARMv4 BX veneers. */
16328 if (htab->bx_glue_size > 0)
16329 {
16330 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16331 ARM_BX_GLUE_SECTION_NAME);
16332
16333 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16334 (output_bfd, osi.sec->output_section);
16335
16336 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16337 }
16338
16339 /* Long calls stubs. */
16340 if (htab->stub_bfd && htab->stub_bfd->sections)
16341 {
16342 asection* stub_sec;
16343
16344 for (stub_sec = htab->stub_bfd->sections;
16345 stub_sec != NULL;
16346 stub_sec = stub_sec->next)
16347 {
16348 /* Ignore non-stub sections. */
16349 if (!strstr (stub_sec->name, STUB_SUFFIX))
16350 continue;
16351
16352 osi.sec = stub_sec;
16353
16354 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16355 (output_bfd, osi.sec->output_section);
16356
16357 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16358 }
16359 }
16360
16361 /* Finally, output mapping symbols for the PLT. */
16362 if (htab->root.splt && htab->root.splt->size > 0)
16363 {
16364 osi.sec = htab->root.splt;
16365 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16366 (output_bfd, osi.sec->output_section));
16367
16368 /* Output mapping symbols for the plt header. SymbianOS does not have a
16369 plt header. */
16370 if (htab->vxworks_p)
16371 {
16372 /* VxWorks shared libraries have no PLT header. */
16373 if (!bfd_link_pic (info))
16374 {
16375 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16376 return FALSE;
16377 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16378 return FALSE;
16379 }
16380 }
16381 else if (htab->nacl_p)
16382 {
16383 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16384 return FALSE;
16385 }
16386 else if (using_thumb_only (htab))
16387 {
16388 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16389 return FALSE;
16390 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16391 return FALSE;
16392 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16393 return FALSE;
16394 }
16395 else if (!htab->symbian_p)
16396 {
16397 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16398 return FALSE;
16399 #ifndef FOUR_WORD_PLT
16400 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16401 return FALSE;
16402 #endif
16403 }
16404 }
16405 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16406 {
16407 /* NaCl uses a special first entry in .iplt too. */
16408 osi.sec = htab->root.iplt;
16409 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16410 (output_bfd, osi.sec->output_section));
16411 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16412 return FALSE;
16413 }
16414 if ((htab->root.splt && htab->root.splt->size > 0)
16415 || (htab->root.iplt && htab->root.iplt->size > 0))
16416 {
16417 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16418 for (input_bfd = info->input_bfds;
16419 input_bfd != NULL;
16420 input_bfd = input_bfd->link.next)
16421 {
16422 struct arm_local_iplt_info **local_iplt;
16423 unsigned int i, num_syms;
16424
16425 local_iplt = elf32_arm_local_iplt (input_bfd);
16426 if (local_iplt != NULL)
16427 {
16428 num_syms = elf_symtab_hdr (input_bfd).sh_info;
16429 for (i = 0; i < num_syms; i++)
16430 if (local_iplt[i] != NULL
16431 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16432 &local_iplt[i]->root,
16433 &local_iplt[i]->arm))
16434 return FALSE;
16435 }
16436 }
16437 }
16438 if (htab->dt_tlsdesc_plt != 0)
16439 {
16440 /* Mapping symbols for the lazy tls trampoline. */
16441 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16442 return FALSE;
16443
16444 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16445 htab->dt_tlsdesc_plt + 24))
16446 return FALSE;
16447 }
16448 if (htab->tls_trampoline != 0)
16449 {
16450 /* Mapping symbols for the tls trampoline. */
16451 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16452 return FALSE;
16453 #ifdef FOUR_WORD_PLT
16454 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16455 htab->tls_trampoline + 12))
16456 return FALSE;
16457 #endif
16458 }
16459
16460 return TRUE;
16461 }
16462
16463 /* Allocate target specific section data. */
16464
16465 static bfd_boolean
16466 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16467 {
16468 if (!sec->used_by_bfd)
16469 {
16470 _arm_elf_section_data *sdata;
16471 bfd_size_type amt = sizeof (*sdata);
16472
16473 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16474 if (sdata == NULL)
16475 return FALSE;
16476 sec->used_by_bfd = sdata;
16477 }
16478
16479 return _bfd_elf_new_section_hook (abfd, sec);
16480 }
16481
16482
16483 /* Used to order a list of mapping symbols by address. */
16484
16485 static int
16486 elf32_arm_compare_mapping (const void * a, const void * b)
16487 {
16488 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16489 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16490
16491 if (amap->vma > bmap->vma)
16492 return 1;
16493 else if (amap->vma < bmap->vma)
16494 return -1;
16495 else if (amap->type > bmap->type)
16496 /* Ensure results do not depend on the host qsort for objects with
16497 multiple mapping symbols at the same address by sorting on type
16498 after vma. */
16499 return 1;
16500 else if (amap->type < bmap->type)
16501 return -1;
16502 else
16503 return 0;
16504 }
16505
16506 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16507
16508 static unsigned long
16509 offset_prel31 (unsigned long addr, bfd_vma offset)
16510 {
16511 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16512 }
16513
16514 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16515 relocations. */
16516
16517 static void
16518 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16519 {
16520 unsigned long first_word = bfd_get_32 (output_bfd, from);
16521 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16522
16523 /* High bit of first word is supposed to be zero. */
16524 if ((first_word & 0x80000000ul) == 0)
16525 first_word = offset_prel31 (first_word, offset);
16526
16527 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16528 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16529 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16530 second_word = offset_prel31 (second_word, offset);
16531
16532 bfd_put_32 (output_bfd, first_word, to);
16533 bfd_put_32 (output_bfd, second_word, to + 4);
16534 }
16535
16536 /* Data for make_branch_to_a8_stub(). */
16537
16538 struct a8_branch_to_stub_data
16539 {
16540 asection *writing_section;
16541 bfd_byte *contents;
16542 };
16543
16544
16545 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16546 places for a particular section. */
16547
16548 static bfd_boolean
16549 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16550 void *in_arg)
16551 {
16552 struct elf32_arm_stub_hash_entry *stub_entry;
16553 struct a8_branch_to_stub_data *data;
16554 bfd_byte *contents;
16555 unsigned long branch_insn;
16556 bfd_vma veneered_insn_loc, veneer_entry_loc;
16557 bfd_signed_vma branch_offset;
16558 bfd *abfd;
16559 unsigned int loc;
16560
16561 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16562 data = (struct a8_branch_to_stub_data *) in_arg;
16563
16564 if (stub_entry->target_section != data->writing_section
16565 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16566 return TRUE;
16567
16568 contents = data->contents;
16569
16570 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16571 generated when both source and target are in the same section. */
16572 veneered_insn_loc = stub_entry->target_section->output_section->vma
16573 + stub_entry->target_section->output_offset
16574 + stub_entry->source_value;
16575
16576 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16577 + stub_entry->stub_sec->output_offset
16578 + stub_entry->stub_offset;
16579
16580 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16581 veneered_insn_loc &= ~3u;
16582
16583 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16584
16585 abfd = stub_entry->target_section->owner;
16586 loc = stub_entry->source_value;
16587
16588 /* We attempt to avoid this condition by setting stubs_always_after_branch
16589 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16590 This check is just to be on the safe side... */
16591 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16592 {
16593 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16594 "allocated in unsafe location"), abfd);
16595 return FALSE;
16596 }
16597
16598 switch (stub_entry->stub_type)
16599 {
16600 case arm_stub_a8_veneer_b:
16601 case arm_stub_a8_veneer_b_cond:
16602 branch_insn = 0xf0009000;
16603 goto jump24;
16604
16605 case arm_stub_a8_veneer_blx:
16606 branch_insn = 0xf000e800;
16607 goto jump24;
16608
16609 case arm_stub_a8_veneer_bl:
16610 {
16611 unsigned int i1, j1, i2, j2, s;
16612
16613 branch_insn = 0xf000d000;
16614
16615 jump24:
16616 if (branch_offset < -16777216 || branch_offset > 16777214)
16617 {
16618 /* There's not much we can do apart from complain if this
16619 happens. */
16620 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16621 "of range (input file too large)"), abfd);
16622 return FALSE;
16623 }
16624
16625 /* i1 = not(j1 eor s), so:
16626 not i1 = j1 eor s
16627 j1 = (not i1) eor s. */
16628
16629 branch_insn |= (branch_offset >> 1) & 0x7ff;
16630 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16631 i2 = (branch_offset >> 22) & 1;
16632 i1 = (branch_offset >> 23) & 1;
16633 s = (branch_offset >> 24) & 1;
16634 j1 = (!i1) ^ s;
16635 j2 = (!i2) ^ s;
16636 branch_insn |= j2 << 11;
16637 branch_insn |= j1 << 13;
16638 branch_insn |= s << 26;
16639 }
16640 break;
16641
16642 default:
16643 BFD_FAIL ();
16644 return FALSE;
16645 }
16646
16647 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16648 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16649
16650 return TRUE;
16651 }
16652
16653 /* Beginning of stm32l4xx work-around. */
16654
16655 /* Functions encoding instructions necessary for the emission of the
16656 fix-stm32l4xx-629360.
16657 Encoding is extracted from the
16658 ARM (C) Architecture Reference Manual
16659 ARMv7-A and ARMv7-R edition
16660 ARM DDI 0406C.b (ID072512). */
16661
16662 static inline bfd_vma
16663 create_instruction_branch_absolute (int branch_offset)
16664 {
16665 /* A8.8.18 B (A8-334)
16666 B target_address (Encoding T4). */
16667 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16668 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16669 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16670
16671 int s = ((branch_offset & 0x1000000) >> 24);
16672 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16673 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16674
16675 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16676 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16677
16678 bfd_vma patched_inst = 0xf0009000
16679 | s << 26 /* S. */
16680 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16681 | j1 << 13 /* J1. */
16682 | j2 << 11 /* J2. */
16683 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16684
16685 return patched_inst;
16686 }
16687
16688 static inline bfd_vma
16689 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16690 {
16691 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16692 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16693 bfd_vma patched_inst = 0xe8900000
16694 | (/*W=*/wback << 21)
16695 | (base_reg << 16)
16696 | (reg_mask & 0x0000ffff);
16697
16698 return patched_inst;
16699 }
16700
16701 static inline bfd_vma
16702 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16703 {
16704 /* A8.8.60 LDMDB/LDMEA (A8-402)
16705 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16706 bfd_vma patched_inst = 0xe9100000
16707 | (/*W=*/wback << 21)
16708 | (base_reg << 16)
16709 | (reg_mask & 0x0000ffff);
16710
16711 return patched_inst;
16712 }
16713
16714 static inline bfd_vma
16715 create_instruction_mov (int target_reg, int source_reg)
16716 {
16717 /* A8.8.103 MOV (register) (A8-486)
16718 MOV Rd, Rm (Encoding T1). */
16719 bfd_vma patched_inst = 0x4600
16720 | (target_reg & 0x7)
16721 | ((target_reg & 0x8) >> 3) << 7
16722 | (source_reg << 3);
16723
16724 return patched_inst;
16725 }
16726
16727 static inline bfd_vma
16728 create_instruction_sub (int target_reg, int source_reg, int value)
16729 {
16730 /* A8.8.221 SUB (immediate) (A8-708)
16731 SUB Rd, Rn, #value (Encoding T3). */
16732 bfd_vma patched_inst = 0xf1a00000
16733 | (target_reg << 8)
16734 | (source_reg << 16)
16735 | (/*S=*/0 << 20)
16736 | ((value & 0x800) >> 11) << 26
16737 | ((value & 0x700) >> 8) << 12
16738 | (value & 0x0ff);
16739
16740 return patched_inst;
16741 }
16742
16743 static inline bfd_vma
16744 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16745 int first_reg)
16746 {
16747 /* A8.8.332 VLDM (A8-922)
16748 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16749 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16750 | (/*W=*/wback << 21)
16751 | (base_reg << 16)
16752 | (num_words & 0x000000ff)
16753 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16754 | (first_reg & 0x00000001) << 22;
16755
16756 return patched_inst;
16757 }
16758
16759 static inline bfd_vma
16760 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16761 int first_reg)
16762 {
16763 /* A8.8.332 VLDM (A8-922)
16764 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16765 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16766 | (base_reg << 16)
16767 | (num_words & 0x000000ff)
16768 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16769 | (first_reg & 0x00000001) << 22;
16770
16771 return patched_inst;
16772 }
16773
16774 static inline bfd_vma
16775 create_instruction_udf_w (int value)
16776 {
16777 /* A8.8.247 UDF (A8-758)
16778 Undefined (Encoding T2). */
16779 bfd_vma patched_inst = 0xf7f0a000
16780 | (value & 0x00000fff)
16781 | (value & 0x000f0000) << 16;
16782
16783 return patched_inst;
16784 }
16785
16786 static inline bfd_vma
16787 create_instruction_udf (int value)
16788 {
16789 /* A8.8.247 UDF (A8-758)
16790 Undefined (Encoding T1). */
16791 bfd_vma patched_inst = 0xde00
16792 | (value & 0xff);
16793
16794 return patched_inst;
16795 }
16796
16797 /* Functions writing an instruction in memory, returning the next
16798 memory position to write to. */
16799
16800 static inline bfd_byte *
16801 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16802 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16803 {
16804 put_thumb2_insn (htab, output_bfd, insn, pt);
16805 return pt + 4;
16806 }
16807
16808 static inline bfd_byte *
16809 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16810 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16811 {
16812 put_thumb_insn (htab, output_bfd, insn, pt);
16813 return pt + 2;
16814 }
16815
16816 /* Function filling up a region in memory with T1 and T2 UDFs taking
16817 care of alignment. */
16818
16819 static bfd_byte *
16820 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16821 bfd * output_bfd,
16822 const bfd_byte * const base_stub_contents,
16823 bfd_byte * const from_stub_contents,
16824 const bfd_byte * const end_stub_contents)
16825 {
16826 bfd_byte *current_stub_contents = from_stub_contents;
16827
16828 /* Fill the remaining of the stub with deterministic contents : UDF
16829 instructions.
16830 Check if realignment is needed on modulo 4 frontier using T1, to
16831 further use T2. */
16832 if ((current_stub_contents < end_stub_contents)
16833 && !((current_stub_contents - base_stub_contents) % 2)
16834 && ((current_stub_contents - base_stub_contents) % 4))
16835 current_stub_contents =
16836 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16837 create_instruction_udf (0));
16838
16839 for (; current_stub_contents < end_stub_contents;)
16840 current_stub_contents =
16841 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16842 create_instruction_udf_w (0));
16843
16844 return current_stub_contents;
16845 }
16846
16847 /* Functions writing the stream of instructions equivalent to the
16848 derived sequence for ldmia, ldmdb, vldm respectively. */
16849
16850 static void
16851 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16852 bfd * output_bfd,
16853 const insn32 initial_insn,
16854 const bfd_byte *const initial_insn_addr,
16855 bfd_byte *const base_stub_contents)
16856 {
16857 int wback = (initial_insn & 0x00200000) >> 21;
16858 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16859 int insn_all_registers = initial_insn & 0x0000ffff;
16860 int insn_low_registers, insn_high_registers;
16861 int usable_register_mask;
16862 int nb_registers = popcount (insn_all_registers);
16863 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16864 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16865 bfd_byte *current_stub_contents = base_stub_contents;
16866
16867 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16868
16869 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16870 smaller than 8 registers load sequences that do not cause the
16871 hardware issue. */
16872 if (nb_registers <= 8)
16873 {
16874 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16875 current_stub_contents =
16876 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16877 initial_insn);
16878
16879 /* B initial_insn_addr+4. */
16880 if (!restore_pc)
16881 current_stub_contents =
16882 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16883 create_instruction_branch_absolute
16884 (initial_insn_addr - current_stub_contents));
16885
16886
16887 /* Fill the remaining of the stub with deterministic contents. */
16888 current_stub_contents =
16889 stm32l4xx_fill_stub_udf (htab, output_bfd,
16890 base_stub_contents, current_stub_contents,
16891 base_stub_contents +
16892 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16893
16894 return;
16895 }
16896
16897 /* - reg_list[13] == 0. */
16898 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16899
16900 /* - reg_list[14] & reg_list[15] != 1. */
16901 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16902
16903 /* - if (wback==1) reg_list[rn] == 0. */
16904 BFD_ASSERT (!wback || !restore_rn);
16905
16906 /* - nb_registers > 8. */
16907 BFD_ASSERT (popcount (insn_all_registers) > 8);
16908
16909 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16910
16911 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16912 - One with the 7 lowest registers (register mask 0x007F)
16913 This LDM will finally contain between 2 and 7 registers
16914 - One with the 7 highest registers (register mask 0xDF80)
16915 This ldm will finally contain between 2 and 7 registers. */
16916 insn_low_registers = insn_all_registers & 0x007F;
16917 insn_high_registers = insn_all_registers & 0xDF80;
16918
16919 /* A spare register may be needed during this veneer to temporarily
16920 handle the base register. This register will be restored with the
16921 last LDM operation.
16922 The usable register may be any general purpose register (that
16923 excludes PC, SP, LR : register mask is 0x1FFF). */
16924 usable_register_mask = 0x1FFF;
16925
16926 /* Generate the stub function. */
16927 if (wback)
16928 {
16929 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16930 current_stub_contents =
16931 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16932 create_instruction_ldmia
16933 (rn, /*wback=*/1, insn_low_registers));
16934
16935 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16936 current_stub_contents =
16937 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16938 create_instruction_ldmia
16939 (rn, /*wback=*/1, insn_high_registers));
16940 if (!restore_pc)
16941 {
16942 /* B initial_insn_addr+4. */
16943 current_stub_contents =
16944 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16945 create_instruction_branch_absolute
16946 (initial_insn_addr - current_stub_contents));
16947 }
16948 }
16949 else /* if (!wback). */
16950 {
16951 ri = rn;
16952
16953 /* If Rn is not part of the high-register-list, move it there. */
16954 if (!(insn_high_registers & (1 << rn)))
16955 {
16956 /* Choose a Ri in the high-register-list that will be restored. */
16957 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16958
16959 /* MOV Ri, Rn. */
16960 current_stub_contents =
16961 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16962 create_instruction_mov (ri, rn));
16963 }
16964
16965 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16966 current_stub_contents =
16967 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16968 create_instruction_ldmia
16969 (ri, /*wback=*/1, insn_low_registers));
16970
16971 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16972 current_stub_contents =
16973 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16974 create_instruction_ldmia
16975 (ri, /*wback=*/0, insn_high_registers));
16976
16977 if (!restore_pc)
16978 {
16979 /* B initial_insn_addr+4. */
16980 current_stub_contents =
16981 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16982 create_instruction_branch_absolute
16983 (initial_insn_addr - current_stub_contents));
16984 }
16985 }
16986
16987 /* Fill the remaining of the stub with deterministic contents. */
16988 current_stub_contents =
16989 stm32l4xx_fill_stub_udf (htab, output_bfd,
16990 base_stub_contents, current_stub_contents,
16991 base_stub_contents +
16992 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16993 }
16994
16995 static void
16996 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16997 bfd * output_bfd,
16998 const insn32 initial_insn,
16999 const bfd_byte *const initial_insn_addr,
17000 bfd_byte *const base_stub_contents)
17001 {
17002 int wback = (initial_insn & 0x00200000) >> 21;
17003 int ri, rn = (initial_insn & 0x000f0000) >> 16;
17004 int insn_all_registers = initial_insn & 0x0000ffff;
17005 int insn_low_registers, insn_high_registers;
17006 int usable_register_mask;
17007 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
17008 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
17009 int nb_registers = popcount (insn_all_registers);
17010 bfd_byte *current_stub_contents = base_stub_contents;
17011
17012 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
17013
17014 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17015 smaller than 8 registers load sequences that do not cause the
17016 hardware issue. */
17017 if (nb_registers <= 8)
17018 {
17019 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
17020 current_stub_contents =
17021 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17022 initial_insn);
17023
17024 /* B initial_insn_addr+4. */
17025 current_stub_contents =
17026 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17027 create_instruction_branch_absolute
17028 (initial_insn_addr - current_stub_contents));
17029
17030 /* Fill the remaining of the stub with deterministic contents. */
17031 current_stub_contents =
17032 stm32l4xx_fill_stub_udf (htab, output_bfd,
17033 base_stub_contents, current_stub_contents,
17034 base_stub_contents +
17035 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17036
17037 return;
17038 }
17039
17040 /* - reg_list[13] == 0. */
17041 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
17042
17043 /* - reg_list[14] & reg_list[15] != 1. */
17044 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17045
17046 /* - if (wback==1) reg_list[rn] == 0. */
17047 BFD_ASSERT (!wback || !restore_rn);
17048
17049 /* - nb_registers > 8. */
17050 BFD_ASSERT (popcount (insn_all_registers) > 8);
17051
17052 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
17053
17054 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17055 - One with the 7 lowest registers (register mask 0x007F)
17056 This LDM will finally contain between 2 and 7 registers
17057 - One with the 7 highest registers (register mask 0xDF80)
17058 This ldm will finally contain between 2 and 7 registers. */
17059 insn_low_registers = insn_all_registers & 0x007F;
17060 insn_high_registers = insn_all_registers & 0xDF80;
17061
17062 /* A spare register may be needed during this veneer to temporarily
17063 handle the base register. This register will be restored with
17064 the last LDM operation.
17065 The usable register may be any general purpose register (that excludes
17066 PC, SP, LR : register mask is 0x1FFF). */
17067 usable_register_mask = 0x1FFF;
17068
17069 /* Generate the stub function. */
17070 if (!wback && !restore_pc && !restore_rn)
17071 {
17072 /* Choose a Ri in the low-register-list that will be restored. */
17073 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17074
17075 /* MOV Ri, Rn. */
17076 current_stub_contents =
17077 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17078 create_instruction_mov (ri, rn));
17079
17080 /* LDMDB Ri!, {R-high-register-list}. */
17081 current_stub_contents =
17082 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17083 create_instruction_ldmdb
17084 (ri, /*wback=*/1, insn_high_registers));
17085
17086 /* LDMDB Ri, {R-low-register-list}. */
17087 current_stub_contents =
17088 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17089 create_instruction_ldmdb
17090 (ri, /*wback=*/0, insn_low_registers));
17091
17092 /* B initial_insn_addr+4. */
17093 current_stub_contents =
17094 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17095 create_instruction_branch_absolute
17096 (initial_insn_addr - current_stub_contents));
17097 }
17098 else if (wback && !restore_pc && !restore_rn)
17099 {
17100 /* LDMDB Rn!, {R-high-register-list}. */
17101 current_stub_contents =
17102 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17103 create_instruction_ldmdb
17104 (rn, /*wback=*/1, insn_high_registers));
17105
17106 /* LDMDB Rn!, {R-low-register-list}. */
17107 current_stub_contents =
17108 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17109 create_instruction_ldmdb
17110 (rn, /*wback=*/1, insn_low_registers));
17111
17112 /* B initial_insn_addr+4. */
17113 current_stub_contents =
17114 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17115 create_instruction_branch_absolute
17116 (initial_insn_addr - current_stub_contents));
17117 }
17118 else if (!wback && restore_pc && !restore_rn)
17119 {
17120 /* Choose a Ri in the high-register-list that will be restored. */
17121 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17122
17123 /* SUB Ri, Rn, #(4*nb_registers). */
17124 current_stub_contents =
17125 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17126 create_instruction_sub (ri, rn, (4 * nb_registers)));
17127
17128 /* LDMIA Ri!, {R-low-register-list}. */
17129 current_stub_contents =
17130 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17131 create_instruction_ldmia
17132 (ri, /*wback=*/1, insn_low_registers));
17133
17134 /* LDMIA Ri, {R-high-register-list}. */
17135 current_stub_contents =
17136 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17137 create_instruction_ldmia
17138 (ri, /*wback=*/0, insn_high_registers));
17139 }
17140 else if (wback && restore_pc && !restore_rn)
17141 {
17142 /* Choose a Ri in the high-register-list that will be restored. */
17143 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17144
17145 /* SUB Rn, Rn, #(4*nb_registers) */
17146 current_stub_contents =
17147 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17148 create_instruction_sub (rn, rn, (4 * nb_registers)));
17149
17150 /* MOV Ri, Rn. */
17151 current_stub_contents =
17152 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17153 create_instruction_mov (ri, rn));
17154
17155 /* LDMIA Ri!, {R-low-register-list}. */
17156 current_stub_contents =
17157 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17158 create_instruction_ldmia
17159 (ri, /*wback=*/1, insn_low_registers));
17160
17161 /* LDMIA Ri, {R-high-register-list}. */
17162 current_stub_contents =
17163 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17164 create_instruction_ldmia
17165 (ri, /*wback=*/0, insn_high_registers));
17166 }
17167 else if (!wback && !restore_pc && restore_rn)
17168 {
17169 ri = rn;
17170 if (!(insn_low_registers & (1 << rn)))
17171 {
17172 /* Choose a Ri in the low-register-list that will be restored. */
17173 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17174
17175 /* MOV Ri, Rn. */
17176 current_stub_contents =
17177 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17178 create_instruction_mov (ri, rn));
17179 }
17180
17181 /* LDMDB Ri!, {R-high-register-list}. */
17182 current_stub_contents =
17183 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17184 create_instruction_ldmdb
17185 (ri, /*wback=*/1, insn_high_registers));
17186
17187 /* LDMDB Ri, {R-low-register-list}. */
17188 current_stub_contents =
17189 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17190 create_instruction_ldmdb
17191 (ri, /*wback=*/0, insn_low_registers));
17192
17193 /* B initial_insn_addr+4. */
17194 current_stub_contents =
17195 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17196 create_instruction_branch_absolute
17197 (initial_insn_addr - current_stub_contents));
17198 }
17199 else if (!wback && restore_pc && restore_rn)
17200 {
17201 ri = rn;
17202 if (!(insn_high_registers & (1 << rn)))
17203 {
17204 /* Choose a Ri in the high-register-list that will be restored. */
17205 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17206 }
17207
17208 /* SUB Ri, Rn, #(4*nb_registers). */
17209 current_stub_contents =
17210 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17211 create_instruction_sub (ri, rn, (4 * nb_registers)));
17212
17213 /* LDMIA Ri!, {R-low-register-list}. */
17214 current_stub_contents =
17215 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17216 create_instruction_ldmia
17217 (ri, /*wback=*/1, insn_low_registers));
17218
17219 /* LDMIA Ri, {R-high-register-list}. */
17220 current_stub_contents =
17221 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17222 create_instruction_ldmia
17223 (ri, /*wback=*/0, insn_high_registers));
17224 }
17225 else if (wback && restore_rn)
17226 {
17227 /* The assembler should not have accepted to encode this. */
17228 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17229 "undefined behavior.\n");
17230 }
17231
17232 /* Fill the remaining of the stub with deterministic contents. */
17233 current_stub_contents =
17234 stm32l4xx_fill_stub_udf (htab, output_bfd,
17235 base_stub_contents, current_stub_contents,
17236 base_stub_contents +
17237 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17238
17239 }
17240
17241 static void
17242 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17243 bfd * output_bfd,
17244 const insn32 initial_insn,
17245 const bfd_byte *const initial_insn_addr,
17246 bfd_byte *const base_stub_contents)
17247 {
17248 int num_words = ((unsigned int) initial_insn << 24) >> 24;
17249 bfd_byte *current_stub_contents = base_stub_contents;
17250
17251 BFD_ASSERT (is_thumb2_vldm (initial_insn));
17252
17253 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17254 smaller than 8 words load sequences that do not cause the
17255 hardware issue. */
17256 if (num_words <= 8)
17257 {
17258 /* Untouched instruction. */
17259 current_stub_contents =
17260 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17261 initial_insn);
17262
17263 /* B initial_insn_addr+4. */
17264 current_stub_contents =
17265 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17266 create_instruction_branch_absolute
17267 (initial_insn_addr - current_stub_contents));
17268 }
17269 else
17270 {
17271 bfd_boolean is_dp = /* DP encoding. */
17272 (initial_insn & 0xfe100f00) == 0xec100b00;
17273 bfd_boolean is_ia_nobang = /* (IA without !). */
17274 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
17275 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
17276 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
17277 bfd_boolean is_db_bang = /* (DB with !). */
17278 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
17279 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17280 /* d = UInt (Vd:D);. */
17281 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17282 | (((unsigned int)initial_insn << 9) >> 31);
17283
17284 /* Compute the number of 8-words chunks needed to split. */
17285 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17286 int chunk;
17287
17288 /* The test coverage has been done assuming the following
17289 hypothesis that exactly one of the previous is_ predicates is
17290 true. */
17291 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17292 && !(is_ia_nobang & is_ia_bang & is_db_bang));
17293
17294 /* We treat the cutting of the words in one pass for all
17295 cases, then we emit the adjustments:
17296
17297 vldm rx, {...}
17298 -> vldm rx!, {8_words_or_less} for each needed 8_word
17299 -> sub rx, rx, #size (list)
17300
17301 vldm rx!, {...}
17302 -> vldm rx!, {8_words_or_less} for each needed 8_word
17303 This also handles vpop instruction (when rx is sp)
17304
17305 vldmd rx!, {...}
17306 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17307 for (chunk = 0; chunk < chunks; ++chunk)
17308 {
17309 bfd_vma new_insn = 0;
17310
17311 if (is_ia_nobang || is_ia_bang)
17312 {
17313 new_insn = create_instruction_vldmia
17314 (base_reg,
17315 is_dp,
17316 /*wback= . */1,
17317 chunks - (chunk + 1) ?
17318 8 : num_words - chunk * 8,
17319 first_reg + chunk * 8);
17320 }
17321 else if (is_db_bang)
17322 {
17323 new_insn = create_instruction_vldmdb
17324 (base_reg,
17325 is_dp,
17326 chunks - (chunk + 1) ?
17327 8 : num_words - chunk * 8,
17328 first_reg + chunk * 8);
17329 }
17330
17331 if (new_insn)
17332 current_stub_contents =
17333 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17334 new_insn);
17335 }
17336
17337 /* Only this case requires the base register compensation
17338 subtract. */
17339 if (is_ia_nobang)
17340 {
17341 current_stub_contents =
17342 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17343 create_instruction_sub
17344 (base_reg, base_reg, 4*num_words));
17345 }
17346
17347 /* B initial_insn_addr+4. */
17348 current_stub_contents =
17349 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17350 create_instruction_branch_absolute
17351 (initial_insn_addr - current_stub_contents));
17352 }
17353
17354 /* Fill the remaining of the stub with deterministic contents. */
17355 current_stub_contents =
17356 stm32l4xx_fill_stub_udf (htab, output_bfd,
17357 base_stub_contents, current_stub_contents,
17358 base_stub_contents +
17359 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17360 }
17361
17362 static void
17363 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17364 bfd * output_bfd,
17365 const insn32 wrong_insn,
17366 const bfd_byte *const wrong_insn_addr,
17367 bfd_byte *const stub_contents)
17368 {
17369 if (is_thumb2_ldmia (wrong_insn))
17370 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17371 wrong_insn, wrong_insn_addr,
17372 stub_contents);
17373 else if (is_thumb2_ldmdb (wrong_insn))
17374 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17375 wrong_insn, wrong_insn_addr,
17376 stub_contents);
17377 else if (is_thumb2_vldm (wrong_insn))
17378 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17379 wrong_insn, wrong_insn_addr,
17380 stub_contents);
17381 }
17382
17383 /* End of stm32l4xx work-around. */
17384
17385
17386 static void
17387 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17388 asection *output_sec, Elf_Internal_Rela *rel)
17389 {
17390 BFD_ASSERT (output_sec && rel);
17391 struct bfd_elf_section_reloc_data *output_reldata;
17392 struct elf32_arm_link_hash_table *htab;
17393 struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17394 Elf_Internal_Shdr *rel_hdr;
17395
17396
17397 if (oesd->rel.hdr)
17398 {
17399 rel_hdr = oesd->rel.hdr;
17400 output_reldata = &(oesd->rel);
17401 }
17402 else if (oesd->rela.hdr)
17403 {
17404 rel_hdr = oesd->rela.hdr;
17405 output_reldata = &(oesd->rela);
17406 }
17407 else
17408 {
17409 abort ();
17410 }
17411
17412 bfd_byte *erel = rel_hdr->contents;
17413 erel += output_reldata->count * rel_hdr->sh_entsize;
17414 htab = elf32_arm_hash_table (info);
17415 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17416 output_reldata->count++;
17417 }
17418
17419 /* Do code byteswapping. Return FALSE afterwards so that the section is
17420 written out as normal. */
17421
17422 static bfd_boolean
17423 elf32_arm_write_section (bfd *output_bfd,
17424 struct bfd_link_info *link_info,
17425 asection *sec,
17426 bfd_byte *contents)
17427 {
17428 unsigned int mapcount, errcount;
17429 _arm_elf_section_data *arm_data;
17430 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17431 elf32_arm_section_map *map;
17432 elf32_vfp11_erratum_list *errnode;
17433 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17434 bfd_vma ptr;
17435 bfd_vma end;
17436 bfd_vma offset = sec->output_section->vma + sec->output_offset;
17437 bfd_byte tmp;
17438 unsigned int i;
17439
17440 if (globals == NULL)
17441 return FALSE;
17442
17443 /* If this section has not been allocated an _arm_elf_section_data
17444 structure then we cannot record anything. */
17445 arm_data = get_arm_elf_section_data (sec);
17446 if (arm_data == NULL)
17447 return FALSE;
17448
17449 mapcount = arm_data->mapcount;
17450 map = arm_data->map;
17451 errcount = arm_data->erratumcount;
17452
17453 if (errcount != 0)
17454 {
17455 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17456
17457 for (errnode = arm_data->erratumlist; errnode != 0;
17458 errnode = errnode->next)
17459 {
17460 bfd_vma target = errnode->vma - offset;
17461
17462 switch (errnode->type)
17463 {
17464 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17465 {
17466 bfd_vma branch_to_veneer;
17467 /* Original condition code of instruction, plus bit mask for
17468 ARM B instruction. */
17469 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17470 | 0x0a000000;
17471
17472 /* The instruction is before the label. */
17473 target -= 4;
17474
17475 /* Above offset included in -4 below. */
17476 branch_to_veneer = errnode->u.b.veneer->vma
17477 - errnode->vma - 4;
17478
17479 if ((signed) branch_to_veneer < -(1 << 25)
17480 || (signed) branch_to_veneer >= (1 << 25))
17481 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17482 "range"), output_bfd);
17483
17484 insn |= (branch_to_veneer >> 2) & 0xffffff;
17485 contents[endianflip ^ target] = insn & 0xff;
17486 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17487 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17488 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17489 }
17490 break;
17491
17492 case VFP11_ERRATUM_ARM_VENEER:
17493 {
17494 bfd_vma branch_from_veneer;
17495 unsigned int insn;
17496
17497 /* Take size of veneer into account. */
17498 branch_from_veneer = errnode->u.v.branch->vma
17499 - errnode->vma - 12;
17500
17501 if ((signed) branch_from_veneer < -(1 << 25)
17502 || (signed) branch_from_veneer >= (1 << 25))
17503 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17504 "range"), output_bfd);
17505
17506 /* Original instruction. */
17507 insn = errnode->u.v.branch->u.b.vfp_insn;
17508 contents[endianflip ^ target] = insn & 0xff;
17509 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17510 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17511 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17512
17513 /* Branch back to insn after original insn. */
17514 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17515 contents[endianflip ^ (target + 4)] = insn & 0xff;
17516 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17517 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17518 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17519 }
17520 break;
17521
17522 default:
17523 abort ();
17524 }
17525 }
17526 }
17527
17528 if (arm_data->stm32l4xx_erratumcount != 0)
17529 {
17530 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17531 stm32l4xx_errnode != 0;
17532 stm32l4xx_errnode = stm32l4xx_errnode->next)
17533 {
17534 bfd_vma target = stm32l4xx_errnode->vma - offset;
17535
17536 switch (stm32l4xx_errnode->type)
17537 {
17538 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17539 {
17540 unsigned int insn;
17541 bfd_vma branch_to_veneer =
17542 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17543
17544 if ((signed) branch_to_veneer < -(1 << 24)
17545 || (signed) branch_to_veneer >= (1 << 24))
17546 {
17547 bfd_vma out_of_range =
17548 ((signed) branch_to_veneer < -(1 << 24)) ?
17549 - branch_to_veneer - (1 << 24) :
17550 ((signed) branch_to_veneer >= (1 << 24)) ?
17551 branch_to_veneer - (1 << 24) : 0;
17552
17553 (*_bfd_error_handler)
17554 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17555 "Jump out of range by %ld bytes. "
17556 "Cannot encode branch instruction. "),
17557 output_bfd,
17558 (long) (stm32l4xx_errnode->vma - 4),
17559 out_of_range);
17560 continue;
17561 }
17562
17563 insn = create_instruction_branch_absolute
17564 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17565
17566 /* The instruction is before the label. */
17567 target -= 4;
17568
17569 put_thumb2_insn (globals, output_bfd,
17570 (bfd_vma) insn, contents + target);
17571 }
17572 break;
17573
17574 case STM32L4XX_ERRATUM_VENEER:
17575 {
17576 bfd_byte * veneer;
17577 bfd_byte * veneer_r;
17578 unsigned int insn;
17579
17580 veneer = contents + target;
17581 veneer_r = veneer
17582 + stm32l4xx_errnode->u.b.veneer->vma
17583 - stm32l4xx_errnode->vma - 4;
17584
17585 if ((signed) (veneer_r - veneer -
17586 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17587 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17588 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17589 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17590 || (signed) (veneer_r - veneer) >= (1 << 24))
17591 {
17592 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17593 "veneer."), output_bfd);
17594 continue;
17595 }
17596
17597 /* Original instruction. */
17598 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17599
17600 stm32l4xx_create_replacing_stub
17601 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17602 }
17603 break;
17604
17605 default:
17606 abort ();
17607 }
17608 }
17609 }
17610
17611 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17612 {
17613 arm_unwind_table_edit *edit_node
17614 = arm_data->u.exidx.unwind_edit_list;
17615 /* Now, sec->size is the size of the section we will write. The original
17616 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17617 markers) was sec->rawsize. (This isn't the case if we perform no
17618 edits, then rawsize will be zero and we should use size). */
17619 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17620 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17621 unsigned int in_index, out_index;
17622 bfd_vma add_to_offsets = 0;
17623
17624 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17625 {
17626 if (edit_node)
17627 {
17628 unsigned int edit_index = edit_node->index;
17629
17630 if (in_index < edit_index && in_index * 8 < input_size)
17631 {
17632 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17633 contents + in_index * 8, add_to_offsets);
17634 out_index++;
17635 in_index++;
17636 }
17637 else if (in_index == edit_index
17638 || (in_index * 8 >= input_size
17639 && edit_index == UINT_MAX))
17640 {
17641 switch (edit_node->type)
17642 {
17643 case DELETE_EXIDX_ENTRY:
17644 in_index++;
17645 add_to_offsets += 8;
17646 break;
17647
17648 case INSERT_EXIDX_CANTUNWIND_AT_END:
17649 {
17650 asection *text_sec = edit_node->linked_section;
17651 bfd_vma text_offset = text_sec->output_section->vma
17652 + text_sec->output_offset
17653 + text_sec->size;
17654 bfd_vma exidx_offset = offset + out_index * 8;
17655 unsigned long prel31_offset;
17656
17657 /* Note: this is meant to be equivalent to an
17658 R_ARM_PREL31 relocation. These synthetic
17659 EXIDX_CANTUNWIND markers are not relocated by the
17660 usual BFD method. */
17661 prel31_offset = (text_offset - exidx_offset)
17662 & 0x7ffffffful;
17663 if (bfd_link_relocatable (link_info))
17664 {
17665 /* Here relocation for new EXIDX_CANTUNWIND is
17666 created, so there is no need to
17667 adjust offset by hand. */
17668 prel31_offset = text_sec->output_offset
17669 + text_sec->size;
17670
17671 /* New relocation entity. */
17672 asection *text_out = text_sec->output_section;
17673 Elf_Internal_Rela rel;
17674 rel.r_addend = 0;
17675 rel.r_offset = exidx_offset;
17676 rel.r_info = ELF32_R_INFO (text_out->target_index,
17677 R_ARM_PREL31);
17678
17679 elf32_arm_add_relocation (output_bfd, link_info,
17680 sec->output_section,
17681 &rel);
17682 }
17683
17684 /* First address we can't unwind. */
17685 bfd_put_32 (output_bfd, prel31_offset,
17686 &edited_contents[out_index * 8]);
17687
17688 /* Code for EXIDX_CANTUNWIND. */
17689 bfd_put_32 (output_bfd, 0x1,
17690 &edited_contents[out_index * 8 + 4]);
17691
17692 out_index++;
17693 add_to_offsets -= 8;
17694 }
17695 break;
17696 }
17697
17698 edit_node = edit_node->next;
17699 }
17700 }
17701 else
17702 {
17703 /* No more edits, copy remaining entries verbatim. */
17704 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17705 contents + in_index * 8, add_to_offsets);
17706 out_index++;
17707 in_index++;
17708 }
17709 }
17710
17711 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17712 bfd_set_section_contents (output_bfd, sec->output_section,
17713 edited_contents,
17714 (file_ptr) sec->output_offset, sec->size);
17715
17716 return TRUE;
17717 }
17718
17719 /* Fix code to point to Cortex-A8 erratum stubs. */
17720 if (globals->fix_cortex_a8)
17721 {
17722 struct a8_branch_to_stub_data data;
17723
17724 data.writing_section = sec;
17725 data.contents = contents;
17726
17727 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17728 & data);
17729 }
17730
17731 if (mapcount == 0)
17732 return FALSE;
17733
17734 if (globals->byteswap_code)
17735 {
17736 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17737
17738 ptr = map[0].vma;
17739 for (i = 0; i < mapcount; i++)
17740 {
17741 if (i == mapcount - 1)
17742 end = sec->size;
17743 else
17744 end = map[i + 1].vma;
17745
17746 switch (map[i].type)
17747 {
17748 case 'a':
17749 /* Byte swap code words. */
17750 while (ptr + 3 < end)
17751 {
17752 tmp = contents[ptr];
17753 contents[ptr] = contents[ptr + 3];
17754 contents[ptr + 3] = tmp;
17755 tmp = contents[ptr + 1];
17756 contents[ptr + 1] = contents[ptr + 2];
17757 contents[ptr + 2] = tmp;
17758 ptr += 4;
17759 }
17760 break;
17761
17762 case 't':
17763 /* Byte swap code halfwords. */
17764 while (ptr + 1 < end)
17765 {
17766 tmp = contents[ptr];
17767 contents[ptr] = contents[ptr + 1];
17768 contents[ptr + 1] = tmp;
17769 ptr += 2;
17770 }
17771 break;
17772
17773 case 'd':
17774 /* Leave data alone. */
17775 break;
17776 }
17777 ptr = end;
17778 }
17779 }
17780
17781 free (map);
17782 arm_data->mapcount = -1;
17783 arm_data->mapsize = 0;
17784 arm_data->map = NULL;
17785
17786 return FALSE;
17787 }
17788
17789 /* Mangle thumb function symbols as we read them in. */
17790
17791 static bfd_boolean
17792 elf32_arm_swap_symbol_in (bfd * abfd,
17793 const void *psrc,
17794 const void *pshn,
17795 Elf_Internal_Sym *dst)
17796 {
17797 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17798 return FALSE;
17799 dst->st_target_internal = 0;
17800
17801 /* New EABI objects mark thumb function symbols by setting the low bit of
17802 the address. */
17803 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17804 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17805 {
17806 if (dst->st_value & 1)
17807 {
17808 dst->st_value &= ~(bfd_vma) 1;
17809 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17810 ST_BRANCH_TO_THUMB);
17811 }
17812 else
17813 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17814 }
17815 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17816 {
17817 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17818 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17819 }
17820 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17821 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17822 else
17823 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17824
17825 return TRUE;
17826 }
17827
17828
17829 /* Mangle thumb function symbols as we write them out. */
17830
17831 static void
17832 elf32_arm_swap_symbol_out (bfd *abfd,
17833 const Elf_Internal_Sym *src,
17834 void *cdst,
17835 void *shndx)
17836 {
17837 Elf_Internal_Sym newsym;
17838
17839 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17840 of the address set, as per the new EABI. We do this unconditionally
17841 because objcopy does not set the elf header flags until after
17842 it writes out the symbol table. */
17843 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17844 {
17845 newsym = *src;
17846 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17847 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17848 if (newsym.st_shndx != SHN_UNDEF)
17849 {
17850 /* Do this only for defined symbols. At link type, the static
17851 linker will simulate the work of dynamic linker of resolving
17852 symbols and will carry over the thumbness of found symbols to
17853 the output symbol table. It's not clear how it happens, but
17854 the thumbness of undefined symbols can well be different at
17855 runtime, and writing '1' for them will be confusing for users
17856 and possibly for dynamic linker itself.
17857 */
17858 newsym.st_value |= 1;
17859 }
17860
17861 src = &newsym;
17862 }
17863 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17864 }
17865
17866 /* Add the PT_ARM_EXIDX program header. */
17867
17868 static bfd_boolean
17869 elf32_arm_modify_segment_map (bfd *abfd,
17870 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17871 {
17872 struct elf_segment_map *m;
17873 asection *sec;
17874
17875 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17876 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17877 {
17878 /* If there is already a PT_ARM_EXIDX header, then we do not
17879 want to add another one. This situation arises when running
17880 "strip"; the input binary already has the header. */
17881 m = elf_seg_map (abfd);
17882 while (m && m->p_type != PT_ARM_EXIDX)
17883 m = m->next;
17884 if (!m)
17885 {
17886 m = (struct elf_segment_map *)
17887 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17888 if (m == NULL)
17889 return FALSE;
17890 m->p_type = PT_ARM_EXIDX;
17891 m->count = 1;
17892 m->sections[0] = sec;
17893
17894 m->next = elf_seg_map (abfd);
17895 elf_seg_map (abfd) = m;
17896 }
17897 }
17898
17899 return TRUE;
17900 }
17901
17902 /* We may add a PT_ARM_EXIDX program header. */
17903
17904 static int
17905 elf32_arm_additional_program_headers (bfd *abfd,
17906 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17907 {
17908 asection *sec;
17909
17910 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17911 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17912 return 1;
17913 else
17914 return 0;
17915 }
17916
17917 /* Hook called by the linker routine which adds symbols from an object
17918 file. */
17919
17920 static bfd_boolean
17921 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17922 Elf_Internal_Sym *sym, const char **namep,
17923 flagword *flagsp, asection **secp, bfd_vma *valp)
17924 {
17925 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17926 && (abfd->flags & DYNAMIC) == 0
17927 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17928 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17929
17930 if (elf32_arm_hash_table (info) == NULL)
17931 return FALSE;
17932
17933 if (elf32_arm_hash_table (info)->vxworks_p
17934 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17935 flagsp, secp, valp))
17936 return FALSE;
17937
17938 return TRUE;
17939 }
17940
17941 /* We use this to override swap_symbol_in and swap_symbol_out. */
17942 const struct elf_size_info elf32_arm_size_info =
17943 {
17944 sizeof (Elf32_External_Ehdr),
17945 sizeof (Elf32_External_Phdr),
17946 sizeof (Elf32_External_Shdr),
17947 sizeof (Elf32_External_Rel),
17948 sizeof (Elf32_External_Rela),
17949 sizeof (Elf32_External_Sym),
17950 sizeof (Elf32_External_Dyn),
17951 sizeof (Elf_External_Note),
17952 4,
17953 1,
17954 32, 2,
17955 ELFCLASS32, EV_CURRENT,
17956 bfd_elf32_write_out_phdrs,
17957 bfd_elf32_write_shdrs_and_ehdr,
17958 bfd_elf32_checksum_contents,
17959 bfd_elf32_write_relocs,
17960 elf32_arm_swap_symbol_in,
17961 elf32_arm_swap_symbol_out,
17962 bfd_elf32_slurp_reloc_table,
17963 bfd_elf32_slurp_symbol_table,
17964 bfd_elf32_swap_dyn_in,
17965 bfd_elf32_swap_dyn_out,
17966 bfd_elf32_swap_reloc_in,
17967 bfd_elf32_swap_reloc_out,
17968 bfd_elf32_swap_reloca_in,
17969 bfd_elf32_swap_reloca_out
17970 };
17971
17972 static bfd_vma
17973 read_code32 (const bfd *abfd, const bfd_byte *addr)
17974 {
17975 /* V7 BE8 code is always little endian. */
17976 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17977 return bfd_getl32 (addr);
17978
17979 return bfd_get_32 (abfd, addr);
17980 }
17981
17982 static bfd_vma
17983 read_code16 (const bfd *abfd, const bfd_byte *addr)
17984 {
17985 /* V7 BE8 code is always little endian. */
17986 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17987 return bfd_getl16 (addr);
17988
17989 return bfd_get_16 (abfd, addr);
17990 }
17991
17992 /* Return size of plt0 entry starting at ADDR
17993 or (bfd_vma) -1 if size can not be determined. */
17994
17995 static bfd_vma
17996 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17997 {
17998 bfd_vma first_word;
17999 bfd_vma plt0_size;
18000
18001 first_word = read_code32 (abfd, addr);
18002
18003 if (first_word == elf32_arm_plt0_entry[0])
18004 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
18005 else if (first_word == elf32_thumb2_plt0_entry[0])
18006 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
18007 else
18008 /* We don't yet handle this PLT format. */
18009 return (bfd_vma) -1;
18010
18011 return plt0_size;
18012 }
18013
18014 /* Return size of plt entry starting at offset OFFSET
18015 of plt section located at address START
18016 or (bfd_vma) -1 if size can not be determined. */
18017
18018 static bfd_vma
18019 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
18020 {
18021 bfd_vma first_insn;
18022 bfd_vma plt_size = 0;
18023 const bfd_byte *addr = start + offset;
18024
18025 /* PLT entry size if fixed on Thumb-only platforms. */
18026 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
18027 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
18028
18029 /* Respect Thumb stub if necessary. */
18030 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
18031 {
18032 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
18033 }
18034
18035 /* Strip immediate from first add. */
18036 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
18037
18038 #ifdef FOUR_WORD_PLT
18039 if (first_insn == elf32_arm_plt_entry[0])
18040 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
18041 #else
18042 if (first_insn == elf32_arm_plt_entry_long[0])
18043 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
18044 else if (first_insn == elf32_arm_plt_entry_short[0])
18045 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
18046 #endif
18047 else
18048 /* We don't yet handle this PLT format. */
18049 return (bfd_vma) -1;
18050
18051 return plt_size;
18052 }
18053
18054 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
18055
18056 static long
18057 elf32_arm_get_synthetic_symtab (bfd *abfd,
18058 long symcount ATTRIBUTE_UNUSED,
18059 asymbol **syms ATTRIBUTE_UNUSED,
18060 long dynsymcount,
18061 asymbol **dynsyms,
18062 asymbol **ret)
18063 {
18064 asection *relplt;
18065 asymbol *s;
18066 arelent *p;
18067 long count, i, n;
18068 size_t size;
18069 Elf_Internal_Shdr *hdr;
18070 char *names;
18071 asection *plt;
18072 bfd_vma offset;
18073 bfd_byte *data;
18074
18075 *ret = NULL;
18076
18077 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18078 return 0;
18079
18080 if (dynsymcount <= 0)
18081 return 0;
18082
18083 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18084 if (relplt == NULL)
18085 return 0;
18086
18087 hdr = &elf_section_data (relplt)->this_hdr;
18088 if (hdr->sh_link != elf_dynsymtab (abfd)
18089 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
18090 return 0;
18091
18092 plt = bfd_get_section_by_name (abfd, ".plt");
18093 if (plt == NULL)
18094 return 0;
18095
18096 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
18097 return -1;
18098
18099 data = plt->contents;
18100 if (data == NULL)
18101 {
18102 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
18103 return -1;
18104 bfd_cache_section_contents((asection *) plt, data);
18105 }
18106
18107 count = relplt->size / hdr->sh_entsize;
18108 size = count * sizeof (asymbol);
18109 p = relplt->relocation;
18110 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18111 {
18112 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
18113 if (p->addend != 0)
18114 size += sizeof ("+0x") - 1 + 8;
18115 }
18116
18117 s = *ret = (asymbol *) bfd_malloc (size);
18118 if (s == NULL)
18119 return -1;
18120
18121 offset = elf32_arm_plt0_size (abfd, data);
18122 if (offset == (bfd_vma) -1)
18123 return -1;
18124
18125 names = (char *) (s + count);
18126 p = relplt->relocation;
18127 n = 0;
18128 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18129 {
18130 size_t len;
18131
18132 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
18133 if (plt_size == (bfd_vma) -1)
18134 break;
18135
18136 *s = **p->sym_ptr_ptr;
18137 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
18138 we are defining a symbol, ensure one of them is set. */
18139 if ((s->flags & BSF_LOCAL) == 0)
18140 s->flags |= BSF_GLOBAL;
18141 s->flags |= BSF_SYNTHETIC;
18142 s->section = plt;
18143 s->value = offset;
18144 s->name = names;
18145 s->udata.p = NULL;
18146 len = strlen ((*p->sym_ptr_ptr)->name);
18147 memcpy (names, (*p->sym_ptr_ptr)->name, len);
18148 names += len;
18149 if (p->addend != 0)
18150 {
18151 char buf[30], *a;
18152
18153 memcpy (names, "+0x", sizeof ("+0x") - 1);
18154 names += sizeof ("+0x") - 1;
18155 bfd_sprintf_vma (abfd, buf, p->addend);
18156 for (a = buf; *a == '0'; ++a)
18157 ;
18158 len = strlen (a);
18159 memcpy (names, a, len);
18160 names += len;
18161 }
18162 memcpy (names, "@plt", sizeof ("@plt"));
18163 names += sizeof ("@plt");
18164 ++s, ++n;
18165 offset += plt_size;
18166 }
18167
18168 return n;
18169 }
18170
18171 static bfd_boolean
18172 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
18173 {
18174 if (hdr->sh_flags & SHF_ARM_PURECODE)
18175 *flags |= SEC_ELF_PURECODE;
18176 return TRUE;
18177 }
18178
18179 static flagword
18180 elf32_arm_lookup_section_flags (char *flag_name)
18181 {
18182 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
18183 return SHF_ARM_PURECODE;
18184
18185 return SEC_NO_FLAGS;
18186 }
18187
18188 static unsigned int
18189 elf32_arm_count_additional_relocs (asection *sec)
18190 {
18191 struct _arm_elf_section_data *arm_data;
18192 arm_data = get_arm_elf_section_data (sec);
18193 return arm_data->additional_reloc_count;
18194 }
18195
18196 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18197 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18198 FALSE otherwise. ISECTION is the best guess matching section from the
18199 input bfd IBFD, but it might be NULL. */
18200
18201 static bfd_boolean
18202 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18203 bfd *obfd ATTRIBUTE_UNUSED,
18204 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18205 Elf_Internal_Shdr *osection)
18206 {
18207 switch (osection->sh_type)
18208 {
18209 case SHT_ARM_EXIDX:
18210 {
18211 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18212 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18213 unsigned i = 0;
18214
18215 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18216 osection->sh_info = 0;
18217
18218 /* The sh_link field must be set to the text section associated with
18219 this index section. Unfortunately the ARM EHABI does not specify
18220 exactly how to determine this association. Our caller does try
18221 to match up OSECTION with its corresponding input section however
18222 so that is a good first guess. */
18223 if (isection != NULL
18224 && osection->bfd_section != NULL
18225 && isection->bfd_section != NULL
18226 && isection->bfd_section->output_section != NULL
18227 && isection->bfd_section->output_section == osection->bfd_section
18228 && iheaders != NULL
18229 && isection->sh_link > 0
18230 && isection->sh_link < elf_numsections (ibfd)
18231 && iheaders[isection->sh_link]->bfd_section != NULL
18232 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18233 )
18234 {
18235 for (i = elf_numsections (obfd); i-- > 0;)
18236 if (oheaders[i]->bfd_section
18237 == iheaders[isection->sh_link]->bfd_section->output_section)
18238 break;
18239 }
18240
18241 if (i == 0)
18242 {
18243 /* Failing that we have to find a matching section ourselves. If
18244 we had the output section name available we could compare that
18245 with input section names. Unfortunately we don't. So instead
18246 we use a simple heuristic and look for the nearest executable
18247 section before this one. */
18248 for (i = elf_numsections (obfd); i-- > 0;)
18249 if (oheaders[i] == osection)
18250 break;
18251 if (i == 0)
18252 break;
18253
18254 while (i-- > 0)
18255 if (oheaders[i]->sh_type == SHT_PROGBITS
18256 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18257 == (SHF_ALLOC | SHF_EXECINSTR))
18258 break;
18259 }
18260
18261 if (i)
18262 {
18263 osection->sh_link = i;
18264 /* If the text section was part of a group
18265 then the index section should be too. */
18266 if (oheaders[i]->sh_flags & SHF_GROUP)
18267 osection->sh_flags |= SHF_GROUP;
18268 return TRUE;
18269 }
18270 }
18271 break;
18272
18273 case SHT_ARM_PREEMPTMAP:
18274 osection->sh_flags = SHF_ALLOC;
18275 break;
18276
18277 case SHT_ARM_ATTRIBUTES:
18278 case SHT_ARM_DEBUGOVERLAY:
18279 case SHT_ARM_OVERLAYSECTION:
18280 default:
18281 break;
18282 }
18283
18284 return FALSE;
18285 }
18286
18287 /* Returns TRUE if NAME is an ARM mapping symbol.
18288 Traditionally the symbols $a, $d and $t have been used.
18289 The ARM ELF standard also defines $x (for A64 code). It also allows a
18290 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
18291 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
18292 not support them here. $t.x indicates the start of ThumbEE instructions. */
18293
18294 static bfd_boolean
18295 is_arm_mapping_symbol (const char * name)
18296 {
18297 return name != NULL /* Paranoia. */
18298 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
18299 the mapping symbols could have acquired a prefix.
18300 We do not support this here, since such symbols no
18301 longer conform to the ARM ELF ABI. */
18302 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
18303 && (name[2] == 0 || name[2] == '.');
18304 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
18305 any characters that follow the period are legal characters for the body
18306 of a symbol's name. For now we just assume that this is the case. */
18307 }
18308
18309 /* Make sure that mapping symbols in object files are not removed via the
18310 "strip --strip-unneeded" tool. These symbols are needed in order to
18311 correctly generate interworking veneers, and for byte swapping code
18312 regions. Once an object file has been linked, it is safe to remove the
18313 symbols as they will no longer be needed. */
18314
18315 static void
18316 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
18317 {
18318 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
18319 && sym->section != bfd_abs_section_ptr
18320 && is_arm_mapping_symbol (sym->name))
18321 sym->flags |= BSF_KEEP;
18322 }
18323
18324 #undef elf_backend_copy_special_section_fields
18325 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18326
18327 #define ELF_ARCH bfd_arch_arm
18328 #define ELF_TARGET_ID ARM_ELF_DATA
18329 #define ELF_MACHINE_CODE EM_ARM
18330 #ifdef __QNXTARGET__
18331 #define ELF_MAXPAGESIZE 0x1000
18332 #else
18333 #define ELF_MAXPAGESIZE 0x10000
18334 #endif
18335 #define ELF_MINPAGESIZE 0x1000
18336 #define ELF_COMMONPAGESIZE 0x1000
18337
18338 #define bfd_elf32_mkobject elf32_arm_mkobject
18339
18340 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18341 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18342 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18343 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18344 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18345 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18346 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18347 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18348 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18349 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18350 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18351 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18352 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18353
18354 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18355 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18356 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18357 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18358 #define elf_backend_check_relocs elf32_arm_check_relocs
18359 #define elf_backend_relocate_section elf32_arm_relocate_section
18360 #define elf_backend_write_section elf32_arm_write_section
18361 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18362 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18363 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18364 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18365 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18366 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18367 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18368 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18369 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18370 #define elf_backend_object_p elf32_arm_object_p
18371 #define elf_backend_fake_sections elf32_arm_fake_sections
18372 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18373 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18374 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18375 #define elf_backend_size_info elf32_arm_size_info
18376 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18377 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18378 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18379 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18380 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18381 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18382 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
18383
18384 #define elf_backend_can_refcount 1
18385 #define elf_backend_can_gc_sections 1
18386 #define elf_backend_plt_readonly 1
18387 #define elf_backend_want_got_plt 1
18388 #define elf_backend_want_plt_sym 0
18389 #define elf_backend_may_use_rel_p 1
18390 #define elf_backend_may_use_rela_p 0
18391 #define elf_backend_default_use_rela_p 0
18392
18393 #define elf_backend_got_header_size 12
18394 #define elf_backend_extern_protected_data 1
18395
18396 #undef elf_backend_obj_attrs_vendor
18397 #define elf_backend_obj_attrs_vendor "aeabi"
18398 #undef elf_backend_obj_attrs_section
18399 #define elf_backend_obj_attrs_section ".ARM.attributes"
18400 #undef elf_backend_obj_attrs_arg_type
18401 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18402 #undef elf_backend_obj_attrs_section_type
18403 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18404 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18405 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18406
18407 #undef elf_backend_section_flags
18408 #define elf_backend_section_flags elf32_arm_section_flags
18409 #undef elf_backend_lookup_section_flags_hook
18410 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18411
18412 #include "elf32-target.h"
18413
18414 /* Native Client targets. */
18415
18416 #undef TARGET_LITTLE_SYM
18417 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18418 #undef TARGET_LITTLE_NAME
18419 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18420 #undef TARGET_BIG_SYM
18421 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18422 #undef TARGET_BIG_NAME
18423 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18424
18425 /* Like elf32_arm_link_hash_table_create -- but overrides
18426 appropriately for NaCl. */
18427
18428 static struct bfd_link_hash_table *
18429 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18430 {
18431 struct bfd_link_hash_table *ret;
18432
18433 ret = elf32_arm_link_hash_table_create (abfd);
18434 if (ret)
18435 {
18436 struct elf32_arm_link_hash_table *htab
18437 = (struct elf32_arm_link_hash_table *) ret;
18438
18439 htab->nacl_p = 1;
18440
18441 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18442 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18443 }
18444 return ret;
18445 }
18446
18447 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18448 really need to use elf32_arm_modify_segment_map. But we do it
18449 anyway just to reduce gratuitous differences with the stock ARM backend. */
18450
18451 static bfd_boolean
18452 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18453 {
18454 return (elf32_arm_modify_segment_map (abfd, info)
18455 && nacl_modify_segment_map (abfd, info));
18456 }
18457
18458 static void
18459 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18460 {
18461 elf32_arm_final_write_processing (abfd, linker);
18462 nacl_final_write_processing (abfd, linker);
18463 }
18464
18465 static bfd_vma
18466 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18467 const arelent *rel ATTRIBUTE_UNUSED)
18468 {
18469 return plt->vma
18470 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18471 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18472 }
18473
18474 #undef elf32_bed
18475 #define elf32_bed elf32_arm_nacl_bed
18476 #undef bfd_elf32_bfd_link_hash_table_create
18477 #define bfd_elf32_bfd_link_hash_table_create \
18478 elf32_arm_nacl_link_hash_table_create
18479 #undef elf_backend_plt_alignment
18480 #define elf_backend_plt_alignment 4
18481 #undef elf_backend_modify_segment_map
18482 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18483 #undef elf_backend_modify_program_headers
18484 #define elf_backend_modify_program_headers nacl_modify_program_headers
18485 #undef elf_backend_final_write_processing
18486 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18487 #undef bfd_elf32_get_synthetic_symtab
18488 #undef elf_backend_plt_sym_val
18489 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18490 #undef elf_backend_copy_special_section_fields
18491
18492 #undef ELF_MINPAGESIZE
18493 #undef ELF_COMMONPAGESIZE
18494
18495
18496 #include "elf32-target.h"
18497
18498 /* Reset to defaults. */
18499 #undef elf_backend_plt_alignment
18500 #undef elf_backend_modify_segment_map
18501 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18502 #undef elf_backend_modify_program_headers
18503 #undef elf_backend_final_write_processing
18504 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18505 #undef ELF_MINPAGESIZE
18506 #define ELF_MINPAGESIZE 0x1000
18507 #undef ELF_COMMONPAGESIZE
18508 #define ELF_COMMONPAGESIZE 0x1000
18509
18510
18511 /* VxWorks Targets. */
18512
18513 #undef TARGET_LITTLE_SYM
18514 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18515 #undef TARGET_LITTLE_NAME
18516 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18517 #undef TARGET_BIG_SYM
18518 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18519 #undef TARGET_BIG_NAME
18520 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18521
18522 /* Like elf32_arm_link_hash_table_create -- but overrides
18523 appropriately for VxWorks. */
18524
18525 static struct bfd_link_hash_table *
18526 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18527 {
18528 struct bfd_link_hash_table *ret;
18529
18530 ret = elf32_arm_link_hash_table_create (abfd);
18531 if (ret)
18532 {
18533 struct elf32_arm_link_hash_table *htab
18534 = (struct elf32_arm_link_hash_table *) ret;
18535 htab->use_rel = 0;
18536 htab->vxworks_p = 1;
18537 }
18538 return ret;
18539 }
18540
18541 static void
18542 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18543 {
18544 elf32_arm_final_write_processing (abfd, linker);
18545 elf_vxworks_final_write_processing (abfd, linker);
18546 }
18547
18548 #undef elf32_bed
18549 #define elf32_bed elf32_arm_vxworks_bed
18550
18551 #undef bfd_elf32_bfd_link_hash_table_create
18552 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18553 #undef elf_backend_final_write_processing
18554 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18555 #undef elf_backend_emit_relocs
18556 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18557
18558 #undef elf_backend_may_use_rel_p
18559 #define elf_backend_may_use_rel_p 0
18560 #undef elf_backend_may_use_rela_p
18561 #define elf_backend_may_use_rela_p 1
18562 #undef elf_backend_default_use_rela_p
18563 #define elf_backend_default_use_rela_p 1
18564 #undef elf_backend_want_plt_sym
18565 #define elf_backend_want_plt_sym 1
18566 #undef ELF_MAXPAGESIZE
18567 #define ELF_MAXPAGESIZE 0x1000
18568
18569 #include "elf32-target.h"
18570
18571
18572 /* Merge backend specific data from an object file to the output
18573 object file when linking. */
18574
18575 static bfd_boolean
18576 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18577 {
18578 flagword out_flags;
18579 flagword in_flags;
18580 bfd_boolean flags_compatible = TRUE;
18581 asection *sec;
18582
18583 /* Check if we have the same endianness. */
18584 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18585 return FALSE;
18586
18587 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18588 return TRUE;
18589
18590 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18591 return FALSE;
18592
18593 /* The input BFD must have had its flags initialised. */
18594 /* The following seems bogus to me -- The flags are initialized in
18595 the assembler but I don't think an elf_flags_init field is
18596 written into the object. */
18597 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18598
18599 in_flags = elf_elfheader (ibfd)->e_flags;
18600 out_flags = elf_elfheader (obfd)->e_flags;
18601
18602 /* In theory there is no reason why we couldn't handle this. However
18603 in practice it isn't even close to working and there is no real
18604 reason to want it. */
18605 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18606 && !(ibfd->flags & DYNAMIC)
18607 && (in_flags & EF_ARM_BE8))
18608 {
18609 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18610 ibfd);
18611 return FALSE;
18612 }
18613
18614 if (!elf_flags_init (obfd))
18615 {
18616 /* If the input is the default architecture and had the default
18617 flags then do not bother setting the flags for the output
18618 architecture, instead allow future merges to do this. If no
18619 future merges ever set these flags then they will retain their
18620 uninitialised values, which surprise surprise, correspond
18621 to the default values. */
18622 if (bfd_get_arch_info (ibfd)->the_default
18623 && elf_elfheader (ibfd)->e_flags == 0)
18624 return TRUE;
18625
18626 elf_flags_init (obfd) = TRUE;
18627 elf_elfheader (obfd)->e_flags = in_flags;
18628
18629 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18630 && bfd_get_arch_info (obfd)->the_default)
18631 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18632
18633 return TRUE;
18634 }
18635
18636 /* Determine what should happen if the input ARM architecture
18637 does not match the output ARM architecture. */
18638 if (! bfd_arm_merge_machines (ibfd, obfd))
18639 return FALSE;
18640
18641 /* Identical flags must be compatible. */
18642 if (in_flags == out_flags)
18643 return TRUE;
18644
18645 /* Check to see if the input BFD actually contains any sections. If
18646 not, its flags may not have been initialised either, but it
18647 cannot actually cause any incompatiblity. Do not short-circuit
18648 dynamic objects; their section list may be emptied by
18649 elf_link_add_object_symbols.
18650
18651 Also check to see if there are no code sections in the input.
18652 In this case there is no need to check for code specific flags.
18653 XXX - do we need to worry about floating-point format compatability
18654 in data sections ? */
18655 if (!(ibfd->flags & DYNAMIC))
18656 {
18657 bfd_boolean null_input_bfd = TRUE;
18658 bfd_boolean only_data_sections = TRUE;
18659
18660 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18661 {
18662 /* Ignore synthetic glue sections. */
18663 if (strcmp (sec->name, ".glue_7")
18664 && strcmp (sec->name, ".glue_7t"))
18665 {
18666 if ((bfd_get_section_flags (ibfd, sec)
18667 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18668 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18669 only_data_sections = FALSE;
18670
18671 null_input_bfd = FALSE;
18672 break;
18673 }
18674 }
18675
18676 if (null_input_bfd || only_data_sections)
18677 return TRUE;
18678 }
18679
18680 /* Complain about various flag mismatches. */
18681 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18682 EF_ARM_EABI_VERSION (out_flags)))
18683 {
18684 _bfd_error_handler
18685 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18686 ibfd, obfd,
18687 (in_flags & EF_ARM_EABIMASK) >> 24,
18688 (out_flags & EF_ARM_EABIMASK) >> 24);
18689 return FALSE;
18690 }
18691
18692 /* Not sure what needs to be checked for EABI versions >= 1. */
18693 /* VxWorks libraries do not use these flags. */
18694 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18695 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18696 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18697 {
18698 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18699 {
18700 _bfd_error_handler
18701 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18702 ibfd, obfd,
18703 in_flags & EF_ARM_APCS_26 ? 26 : 32,
18704 out_flags & EF_ARM_APCS_26 ? 26 : 32);
18705 flags_compatible = FALSE;
18706 }
18707
18708 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18709 {
18710 if (in_flags & EF_ARM_APCS_FLOAT)
18711 _bfd_error_handler
18712 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18713 ibfd, obfd);
18714 else
18715 _bfd_error_handler
18716 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18717 ibfd, obfd);
18718
18719 flags_compatible = FALSE;
18720 }
18721
18722 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18723 {
18724 if (in_flags & EF_ARM_VFP_FLOAT)
18725 _bfd_error_handler
18726 (_("error: %B uses VFP instructions, whereas %B does not"),
18727 ibfd, obfd);
18728 else
18729 _bfd_error_handler
18730 (_("error: %B uses FPA instructions, whereas %B does not"),
18731 ibfd, obfd);
18732
18733 flags_compatible = FALSE;
18734 }
18735
18736 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18737 {
18738 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18739 _bfd_error_handler
18740 (_("error: %B uses Maverick instructions, whereas %B does not"),
18741 ibfd, obfd);
18742 else
18743 _bfd_error_handler
18744 (_("error: %B does not use Maverick instructions, whereas %B does"),
18745 ibfd, obfd);
18746
18747 flags_compatible = FALSE;
18748 }
18749
18750 #ifdef EF_ARM_SOFT_FLOAT
18751 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18752 {
18753 /* We can allow interworking between code that is VFP format
18754 layout, and uses either soft float or integer regs for
18755 passing floating point arguments and results. We already
18756 know that the APCS_FLOAT flags match; similarly for VFP
18757 flags. */
18758 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18759 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18760 {
18761 if (in_flags & EF_ARM_SOFT_FLOAT)
18762 _bfd_error_handler
18763 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18764 ibfd, obfd);
18765 else
18766 _bfd_error_handler
18767 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18768 ibfd, obfd);
18769
18770 flags_compatible = FALSE;
18771 }
18772 }
18773 #endif
18774
18775 /* Interworking mismatch is only a warning. */
18776 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18777 {
18778 if (in_flags & EF_ARM_INTERWORK)
18779 {
18780 _bfd_error_handler
18781 (_("Warning: %B supports interworking, whereas %B does not"),
18782 ibfd, obfd);
18783 }
18784 else
18785 {
18786 _bfd_error_handler
18787 (_("Warning: %B does not support interworking, whereas %B does"),
18788 ibfd, obfd);
18789 }
18790 }
18791 }
18792
18793 return flags_compatible;
18794 }
18795
18796
18797 /* Symbian OS Targets. */
18798
18799 #undef TARGET_LITTLE_SYM
18800 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18801 #undef TARGET_LITTLE_NAME
18802 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18803 #undef TARGET_BIG_SYM
18804 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18805 #undef TARGET_BIG_NAME
18806 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18807
18808 /* Like elf32_arm_link_hash_table_create -- but overrides
18809 appropriately for Symbian OS. */
18810
18811 static struct bfd_link_hash_table *
18812 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18813 {
18814 struct bfd_link_hash_table *ret;
18815
18816 ret = elf32_arm_link_hash_table_create (abfd);
18817 if (ret)
18818 {
18819 struct elf32_arm_link_hash_table *htab
18820 = (struct elf32_arm_link_hash_table *)ret;
18821 /* There is no PLT header for Symbian OS. */
18822 htab->plt_header_size = 0;
18823 /* The PLT entries are each one instruction and one word. */
18824 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18825 htab->symbian_p = 1;
18826 /* Symbian uses armv5t or above, so use_blx is always true. */
18827 htab->use_blx = 1;
18828 htab->root.is_relocatable_executable = 1;
18829 }
18830 return ret;
18831 }
18832
18833 static const struct bfd_elf_special_section
18834 elf32_arm_symbian_special_sections[] =
18835 {
18836 /* In a BPABI executable, the dynamic linking sections do not go in
18837 the loadable read-only segment. The post-linker may wish to
18838 refer to these sections, but they are not part of the final
18839 program image. */
18840 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18841 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18842 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18843 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18844 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18845 /* These sections do not need to be writable as the SymbianOS
18846 postlinker will arrange things so that no dynamic relocation is
18847 required. */
18848 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18849 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18850 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18851 { NULL, 0, 0, 0, 0 }
18852 };
18853
18854 static void
18855 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18856 struct bfd_link_info *link_info)
18857 {
18858 /* BPABI objects are never loaded directly by an OS kernel; they are
18859 processed by a postlinker first, into an OS-specific format. If
18860 the D_PAGED bit is set on the file, BFD will align segments on
18861 page boundaries, so that an OS can directly map the file. With
18862 BPABI objects, that just results in wasted space. In addition,
18863 because we clear the D_PAGED bit, map_sections_to_segments will
18864 recognize that the program headers should not be mapped into any
18865 loadable segment. */
18866 abfd->flags &= ~D_PAGED;
18867 elf32_arm_begin_write_processing (abfd, link_info);
18868 }
18869
18870 static bfd_boolean
18871 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18872 struct bfd_link_info *info)
18873 {
18874 struct elf_segment_map *m;
18875 asection *dynsec;
18876
18877 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18878 segment. However, because the .dynamic section is not marked
18879 with SEC_LOAD, the generic ELF code will not create such a
18880 segment. */
18881 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18882 if (dynsec)
18883 {
18884 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18885 if (m->p_type == PT_DYNAMIC)
18886 break;
18887
18888 if (m == NULL)
18889 {
18890 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18891 m->next = elf_seg_map (abfd);
18892 elf_seg_map (abfd) = m;
18893 }
18894 }
18895
18896 /* Also call the generic arm routine. */
18897 return elf32_arm_modify_segment_map (abfd, info);
18898 }
18899
18900 /* Return address for Ith PLT stub in section PLT, for relocation REL
18901 or (bfd_vma) -1 if it should not be included. */
18902
18903 static bfd_vma
18904 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18905 const arelent *rel ATTRIBUTE_UNUSED)
18906 {
18907 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18908 }
18909
18910 #undef elf32_bed
18911 #define elf32_bed elf32_arm_symbian_bed
18912
18913 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18914 will process them and then discard them. */
18915 #undef ELF_DYNAMIC_SEC_FLAGS
18916 #define ELF_DYNAMIC_SEC_FLAGS \
18917 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18918
18919 #undef elf_backend_emit_relocs
18920
18921 #undef bfd_elf32_bfd_link_hash_table_create
18922 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18923 #undef elf_backend_special_sections
18924 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18925 #undef elf_backend_begin_write_processing
18926 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18927 #undef elf_backend_final_write_processing
18928 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18929
18930 #undef elf_backend_modify_segment_map
18931 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18932
18933 /* There is no .got section for BPABI objects, and hence no header. */
18934 #undef elf_backend_got_header_size
18935 #define elf_backend_got_header_size 0
18936
18937 /* Similarly, there is no .got.plt section. */
18938 #undef elf_backend_want_got_plt
18939 #define elf_backend_want_got_plt 0
18940
18941 #undef elf_backend_plt_sym_val
18942 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18943
18944 #undef elf_backend_may_use_rel_p
18945 #define elf_backend_may_use_rel_p 1
18946 #undef elf_backend_may_use_rela_p
18947 #define elf_backend_may_use_rela_p 0
18948 #undef elf_backend_default_use_rela_p
18949 #define elf_backend_default_use_rela_p 0
18950 #undef elf_backend_want_plt_sym
18951 #define elf_backend_want_plt_sym 0
18952 #undef ELF_MAXPAGESIZE
18953 #define ELF_MAXPAGESIZE 0x8000
18954
18955 #include "elf32-target.h"
This page took 0.586865 seconds and 4 git commands to generate.