c21d45a4fbc5e96dd392f0cdbce3e59d7a88aa8c
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1764 };
1765
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769 HOWTO (R_ARM_RREL32, /* type */
1770 0, /* rightshift */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1772 0, /* bitsize */
1773 FALSE, /* pc_relative */
1774 0, /* bitpos */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1779 0, /* src_mask */
1780 0, /* dst_mask */
1781 FALSE), /* pcrel_offset */
1782
1783 HOWTO (R_ARM_RABS32, /* type */
1784 0, /* rightshift */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1786 0, /* bitsize */
1787 FALSE, /* pc_relative */
1788 0, /* bitpos */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1793 0, /* src_mask */
1794 0, /* dst_mask */
1795 FALSE), /* pcrel_offset */
1796
1797 HOWTO (R_ARM_RPC24, /* type */
1798 0, /* rightshift */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1800 0, /* bitsize */
1801 FALSE, /* pc_relative */
1802 0, /* bitpos */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1807 0, /* src_mask */
1808 0, /* dst_mask */
1809 FALSE), /* pcrel_offset */
1810
1811 HOWTO (R_ARM_RBASE, /* type */
1812 0, /* rightshift */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1814 0, /* bitsize */
1815 FALSE, /* pc_relative */
1816 0, /* bitpos */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1821 0, /* src_mask */
1822 0, /* dst_mask */
1823 FALSE) /* pcrel_offset */
1824 };
1825
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1831
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838
1839 return NULL;
1840 }
1841
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1845 {
1846 unsigned int r_type;
1847
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851
1852 struct elf32_arm_reloc_map
1853 {
1854 bfd_reloc_code_real_type bfd_reloc_val;
1855 unsigned char elf_reloc_val;
1856 };
1857
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860 {
1861 {BFD_RELOC_NONE, R_ARM_NONE},
1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1867 {BFD_RELOC_32, R_ARM_ABS32},
1868 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1869 {BFD_RELOC_8, R_ARM_ABS8},
1870 {BFD_RELOC_16, R_ARM_ABS16},
1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951 };
1952
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 bfd_reloc_code_real_type code)
1956 {
1957 unsigned int i;
1958
1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962
1963 return NULL;
1964 }
1965
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 const char *r_name)
1969 {
1970 unsigned int i;
1971
1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973 if (elf32_arm_howto_table_1[i].name != NULL
1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975 return &elf32_arm_howto_table_1[i];
1976
1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978 if (elf32_arm_howto_table_2[i].name != NULL
1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980 return &elf32_arm_howto_table_2[i];
1981
1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983 if (elf32_arm_howto_table_3[i].name != NULL
1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985 return &elf32_arm_howto_table_3[i];
1986
1987 return NULL;
1988 }
1989
1990 /* Support for core dump NOTE sections. */
1991
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995 int offset;
1996 size_t size;
1997
1998 switch (note->descsz)
1999 {
2000 default:
2001 return FALSE;
2002
2003 case 148: /* Linux/ARM 32-bit. */
2004 /* pr_cursig */
2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006
2007 /* pr_pid */
2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009
2010 /* pr_reg */
2011 offset = 72;
2012 size = 72;
2013
2014 break;
2015 }
2016
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 size, note->descpos + offset);
2020 }
2021
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025 switch (note->descsz)
2026 {
2027 default:
2028 return FALSE;
2029
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd)->core->pid
2032 = bfd_get_32 (abfd, note->descdata + 12);
2033 elf_tdata (abfd)->core->program
2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 elf_tdata (abfd)->core->command
2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037 }
2038
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2042 {
2043 char *command = elf_tdata (abfd)->core->command;
2044 int n = strlen (command);
2045
2046 if (0 < n && command[n - 1] == ' ')
2047 command[n - 1] = '\0';
2048 }
2049
2050 return TRUE;
2051 }
2052
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 int note_type, ...)
2056 {
2057 switch (note_type)
2058 {
2059 default:
2060 return NULL;
2061
2062 case NT_PRPSINFO:
2063 {
2064 char data[124];
2065 va_list ap;
2066
2067 va_start (ap, note_type);
2068 memset (data, 0, sizeof (data));
2069 strncpy (data + 28, va_arg (ap, const char *), 16);
2070 strncpy (data + 44, va_arg (ap, const char *), 80);
2071 va_end (ap);
2072
2073 return elfcore_write_note (abfd, buf, bufsiz,
2074 "CORE", note_type, data, sizeof (data));
2075 }
2076
2077 case NT_PRSTATUS:
2078 {
2079 char data[148];
2080 va_list ap;
2081 long pid;
2082 int cursig;
2083 const void *greg;
2084
2085 va_start (ap, note_type);
2086 memset (data, 0, sizeof (data));
2087 pid = va_arg (ap, long);
2088 bfd_put_32 (abfd, pid, data + 24);
2089 cursig = va_arg (ap, int);
2090 bfd_put_16 (abfd, cursig, data + 12);
2091 greg = va_arg (ap, const void *);
2092 memcpy (data + 72, greg, 72);
2093 va_end (ap);
2094
2095 return elfcore_write_note (abfd, buf, bufsiz,
2096 "CORE", note_type, data, sizeof (data));
2097 }
2098 }
2099 }
2100
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2105
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2109
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114 interworkable. */
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2119
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2126
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2129
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2132
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2135
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2138
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2140
2141 /* The name of the dynamic interpreter. This is put in the .interp
2142 section. */
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2144
2145 static const unsigned long tls_trampoline [] =
2146 {
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2150 };
2151
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164
2165 #ifdef FOUR_WORD_PLT
2166
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2170 linker first. */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2177 };
2178
2179 /* Subsequent entries in a procedure linkage table look like
2180 this. */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2187 };
2188
2189 #else /* not FOUR_WORD_PLT */
2190
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2194 linker first. */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2202 };
2203
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2211 };
2212
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2221 };
2222
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224
2225 #endif /* not FOUR_WORD_PLT */
2226
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2236 /* add lr, pc */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2239 };
2240
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242 look like this. */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2251 /* nop */
2252 };
2253
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2262 };
2263
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2273 };
2274
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2284 };
2285
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290 0x4778, /* bx pc */
2291 0x46c0 /* nop */
2292 };
2293
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295 address spaces. */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2300 };
2301
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2305 linker first. */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308 /* First bundle: */
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2318 /* Third bundle: */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2322 /* .Lplt_tail: */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2331
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2339 };
2340
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349
2350 enum stub_insn_type
2351 {
2352 THUMB16_TYPE = 1,
2353 THUMB32_TYPE,
2354 ARM_TYPE,
2355 DATA_TYPE
2356 };
2357
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2367
2368 typedef struct
2369 {
2370 bfd_vma data;
2371 enum stub_insn_type type;
2372 unsigned int r_type;
2373 int reloc_addend;
2374 } insn_sequence;
2375
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2379 {
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2382 };
2383
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2385 available. */
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2387 {
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2391 };
2392
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2395 {
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2403 };
2404
2405 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2406 allowed. */
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2408 {
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2412 ARM_INSN (0xe12fff1c), /* bx ip */
2413 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2414 };
2415
2416 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2417 available. */
2418 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2419 {
2420 THUMB16_INSN (0x4778), /* bx pc */
2421 THUMB16_INSN (0x46c0), /* nop */
2422 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2423 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2424 };
2425
2426 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2427 one, when the destination is close enough. */
2428 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2429 {
2430 THUMB16_INSN (0x4778), /* bx pc */
2431 THUMB16_INSN (0x46c0), /* nop */
2432 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2433 };
2434
2435 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2436 blx to reach the stub if necessary. */
2437 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2438 {
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2440 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2441 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2442 };
2443
2444 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2445 blx to reach the stub if necessary. We can not add into pc;
2446 it is not guaranteed to mode switch (different in ARMv6 and
2447 ARMv7). */
2448 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2449 {
2450 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2451 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2452 ARM_INSN (0xe12fff1c), /* bx ip */
2453 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2454 };
2455
2456 /* V4T ARM -> ARM long branch stub, PIC. */
2457 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2458 {
2459 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2460 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2461 ARM_INSN (0xe12fff1c), /* bx ip */
2462 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2463 };
2464
2465 /* V4T Thumb -> ARM long branch stub, PIC. */
2466 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2467 {
2468 THUMB16_INSN (0x4778), /* bx pc */
2469 THUMB16_INSN (0x46c0), /* nop */
2470 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2471 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2472 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2473 };
2474
2475 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2476 architectures. */
2477 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2478 {
2479 THUMB16_INSN (0xb401), /* push {r0} */
2480 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2481 THUMB16_INSN (0x46fc), /* mov ip, pc */
2482 THUMB16_INSN (0x4484), /* add ip, r0 */
2483 THUMB16_INSN (0xbc01), /* pop {r0} */
2484 THUMB16_INSN (0x4760), /* bx ip */
2485 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2486 };
2487
2488 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2489 allowed. */
2490 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2491 {
2492 THUMB16_INSN (0x4778), /* bx pc */
2493 THUMB16_INSN (0x46c0), /* nop */
2494 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2495 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2496 ARM_INSN (0xe12fff1c), /* bx ip */
2497 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2498 };
2499
2500 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2501 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2502 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2503 {
2504 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2505 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2506 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2507 };
2508
2509 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2510 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2511 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2512 {
2513 THUMB16_INSN (0x4778), /* bx pc */
2514 THUMB16_INSN (0x46c0), /* nop */
2515 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2516 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2517 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2518 };
2519
2520 /* NaCl ARM -> ARM long branch stub. */
2521 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2522 {
2523 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2524 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2525 ARM_INSN (0xe12fff1c), /* bx ip */
2526 ARM_INSN (0xe320f000), /* nop */
2527 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2528 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2529 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2530 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2531 };
2532
2533 /* NaCl ARM -> ARM long branch stub, PIC. */
2534 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2535 {
2536 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2537 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2538 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2539 ARM_INSN (0xe12fff1c), /* bx ip */
2540 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2541 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2542 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2543 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2544 };
2545
2546
2547 /* Cortex-A8 erratum-workaround stubs. */
2548
2549 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2550 can't use a conditional branch to reach this stub). */
2551
2552 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2553 {
2554 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2555 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2556 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2557 };
2558
2559 /* Stub used for b.w and bl.w instructions. */
2560
2561 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2562 {
2563 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2564 };
2565
2566 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2567 {
2568 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2569 };
2570
2571 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2572 instruction (which switches to ARM mode) to point to this stub. Jump to the
2573 real destination using an ARM-mode branch. */
2574
2575 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2576 {
2577 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2578 };
2579
2580 /* For each section group there can be a specially created linker section
2581 to hold the stubs for that group. The name of the stub section is based
2582 upon the name of another section within that group with the suffix below
2583 applied.
2584
2585 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2586 create what appeared to be a linker stub section when it actually
2587 contained user code/data. For example, consider this fragment:
2588
2589 const char * stubborn_problems[] = { "np" };
2590
2591 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2592 section called:
2593
2594 .data.rel.local.stubborn_problems
2595
2596 This then causes problems in arm32_arm_build_stubs() as it triggers:
2597
2598 // Ignore non-stub sections.
2599 if (!strstr (stub_sec->name, STUB_SUFFIX))
2600 continue;
2601
2602 And so the section would be ignored instead of being processed. Hence
2603 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2604 C identifier. */
2605 #define STUB_SUFFIX ".__stub"
2606
2607 /* One entry per long/short branch stub defined above. */
2608 #define DEF_STUBS \
2609 DEF_STUB(long_branch_any_any) \
2610 DEF_STUB(long_branch_v4t_arm_thumb) \
2611 DEF_STUB(long_branch_thumb_only) \
2612 DEF_STUB(long_branch_v4t_thumb_thumb) \
2613 DEF_STUB(long_branch_v4t_thumb_arm) \
2614 DEF_STUB(short_branch_v4t_thumb_arm) \
2615 DEF_STUB(long_branch_any_arm_pic) \
2616 DEF_STUB(long_branch_any_thumb_pic) \
2617 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2618 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2619 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2620 DEF_STUB(long_branch_thumb_only_pic) \
2621 DEF_STUB(long_branch_any_tls_pic) \
2622 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2623 DEF_STUB(long_branch_arm_nacl) \
2624 DEF_STUB(long_branch_arm_nacl_pic) \
2625 DEF_STUB(a8_veneer_b_cond) \
2626 DEF_STUB(a8_veneer_b) \
2627 DEF_STUB(a8_veneer_bl) \
2628 DEF_STUB(a8_veneer_blx)
2629
2630 #define DEF_STUB(x) arm_stub_##x,
2631 enum elf32_arm_stub_type
2632 {
2633 arm_stub_none,
2634 DEF_STUBS
2635 max_stub_type
2636 };
2637 #undef DEF_STUB
2638
2639 /* Note the first a8_veneer type. */
2640 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2641
2642 typedef struct
2643 {
2644 const insn_sequence* template_sequence;
2645 int template_size;
2646 } stub_def;
2647
2648 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2649 static const stub_def stub_definitions[] =
2650 {
2651 {NULL, 0},
2652 DEF_STUBS
2653 };
2654
2655 struct elf32_arm_stub_hash_entry
2656 {
2657 /* Base hash table entry structure. */
2658 struct bfd_hash_entry root;
2659
2660 /* The stub section. */
2661 asection *stub_sec;
2662
2663 /* Offset within stub_sec of the beginning of this stub. */
2664 bfd_vma stub_offset;
2665
2666 /* Given the symbol's value and its section we can determine its final
2667 value when building the stubs (so the stub knows where to jump). */
2668 bfd_vma target_value;
2669 asection *target_section;
2670
2671 /* Same as above but for the source of the branch to the stub. Used for
2672 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2673 such, source section does not need to be recorded since Cortex-A8 erratum
2674 workaround stubs are only generated when both source and target are in the
2675 same section. */
2676 bfd_vma source_value;
2677
2678 /* The instruction which caused this stub to be generated (only valid for
2679 Cortex-A8 erratum workaround stubs at present). */
2680 unsigned long orig_insn;
2681
2682 /* The stub type. */
2683 enum elf32_arm_stub_type stub_type;
2684 /* Its encoding size in bytes. */
2685 int stub_size;
2686 /* Its template. */
2687 const insn_sequence *stub_template;
2688 /* The size of the template (number of entries). */
2689 int stub_template_size;
2690
2691 /* The symbol table entry, if any, that this was derived from. */
2692 struct elf32_arm_link_hash_entry *h;
2693
2694 /* Type of branch. */
2695 enum arm_st_branch_type branch_type;
2696
2697 /* Where this stub is being called from, or, in the case of combined
2698 stub sections, the first input section in the group. */
2699 asection *id_sec;
2700
2701 /* The name for the local symbol at the start of this stub. The
2702 stub name in the hash table has to be unique; this does not, so
2703 it can be friendlier. */
2704 char *output_name;
2705 };
2706
2707 /* Used to build a map of a section. This is required for mixed-endian
2708 code/data. */
2709
2710 typedef struct elf32_elf_section_map
2711 {
2712 bfd_vma vma;
2713 char type;
2714 }
2715 elf32_arm_section_map;
2716
2717 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2718
2719 typedef enum
2720 {
2721 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2722 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2723 VFP11_ERRATUM_ARM_VENEER,
2724 VFP11_ERRATUM_THUMB_VENEER
2725 }
2726 elf32_vfp11_erratum_type;
2727
2728 typedef struct elf32_vfp11_erratum_list
2729 {
2730 struct elf32_vfp11_erratum_list *next;
2731 bfd_vma vma;
2732 union
2733 {
2734 struct
2735 {
2736 struct elf32_vfp11_erratum_list *veneer;
2737 unsigned int vfp_insn;
2738 } b;
2739 struct
2740 {
2741 struct elf32_vfp11_erratum_list *branch;
2742 unsigned int id;
2743 } v;
2744 } u;
2745 elf32_vfp11_erratum_type type;
2746 }
2747 elf32_vfp11_erratum_list;
2748
2749 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2750 veneer. */
2751 typedef enum
2752 {
2753 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2754 STM32L4XX_ERRATUM_VENEER
2755 }
2756 elf32_stm32l4xx_erratum_type;
2757
2758 typedef struct elf32_stm32l4xx_erratum_list
2759 {
2760 struct elf32_stm32l4xx_erratum_list *next;
2761 bfd_vma vma;
2762 union
2763 {
2764 struct
2765 {
2766 struct elf32_stm32l4xx_erratum_list *veneer;
2767 unsigned int insn;
2768 } b;
2769 struct
2770 {
2771 struct elf32_stm32l4xx_erratum_list *branch;
2772 unsigned int id;
2773 } v;
2774 } u;
2775 elf32_stm32l4xx_erratum_type type;
2776 }
2777 elf32_stm32l4xx_erratum_list;
2778
2779 typedef enum
2780 {
2781 DELETE_EXIDX_ENTRY,
2782 INSERT_EXIDX_CANTUNWIND_AT_END
2783 }
2784 arm_unwind_edit_type;
2785
2786 /* A (sorted) list of edits to apply to an unwind table. */
2787 typedef struct arm_unwind_table_edit
2788 {
2789 arm_unwind_edit_type type;
2790 /* Note: we sometimes want to insert an unwind entry corresponding to a
2791 section different from the one we're currently writing out, so record the
2792 (text) section this edit relates to here. */
2793 asection *linked_section;
2794 unsigned int index;
2795 struct arm_unwind_table_edit *next;
2796 }
2797 arm_unwind_table_edit;
2798
2799 typedef struct _arm_elf_section_data
2800 {
2801 /* Information about mapping symbols. */
2802 struct bfd_elf_section_data elf;
2803 unsigned int mapcount;
2804 unsigned int mapsize;
2805 elf32_arm_section_map *map;
2806 /* Information about CPU errata. */
2807 unsigned int erratumcount;
2808 elf32_vfp11_erratum_list *erratumlist;
2809 unsigned int stm32l4xx_erratumcount;
2810 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2811 unsigned int additional_reloc_count;
2812 /* Information about unwind tables. */
2813 union
2814 {
2815 /* Unwind info attached to a text section. */
2816 struct
2817 {
2818 asection *arm_exidx_sec;
2819 } text;
2820
2821 /* Unwind info attached to an .ARM.exidx section. */
2822 struct
2823 {
2824 arm_unwind_table_edit *unwind_edit_list;
2825 arm_unwind_table_edit *unwind_edit_tail;
2826 } exidx;
2827 } u;
2828 }
2829 _arm_elf_section_data;
2830
2831 #define elf32_arm_section_data(sec) \
2832 ((_arm_elf_section_data *) elf_section_data (sec))
2833
2834 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2835 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2836 so may be created multiple times: we use an array of these entries whilst
2837 relaxing which we can refresh easily, then create stubs for each potentially
2838 erratum-triggering instruction once we've settled on a solution. */
2839
2840 struct a8_erratum_fix
2841 {
2842 bfd *input_bfd;
2843 asection *section;
2844 bfd_vma offset;
2845 bfd_vma target_offset;
2846 unsigned long orig_insn;
2847 char *stub_name;
2848 enum elf32_arm_stub_type stub_type;
2849 enum arm_st_branch_type branch_type;
2850 };
2851
2852 /* A table of relocs applied to branches which might trigger Cortex-A8
2853 erratum. */
2854
2855 struct a8_erratum_reloc
2856 {
2857 bfd_vma from;
2858 bfd_vma destination;
2859 struct elf32_arm_link_hash_entry *hash;
2860 const char *sym_name;
2861 unsigned int r_type;
2862 enum arm_st_branch_type branch_type;
2863 bfd_boolean non_a8_stub;
2864 };
2865
2866 /* The size of the thread control block. */
2867 #define TCB_SIZE 8
2868
2869 /* ARM-specific information about a PLT entry, over and above the usual
2870 gotplt_union. */
2871 struct arm_plt_info
2872 {
2873 /* We reference count Thumb references to a PLT entry separately,
2874 so that we can emit the Thumb trampoline only if needed. */
2875 bfd_signed_vma thumb_refcount;
2876
2877 /* Some references from Thumb code may be eliminated by BL->BLX
2878 conversion, so record them separately. */
2879 bfd_signed_vma maybe_thumb_refcount;
2880
2881 /* How many of the recorded PLT accesses were from non-call relocations.
2882 This information is useful when deciding whether anything takes the
2883 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2884 non-call references to the function should resolve directly to the
2885 real runtime target. */
2886 unsigned int noncall_refcount;
2887
2888 /* Since PLT entries have variable size if the Thumb prologue is
2889 used, we need to record the index into .got.plt instead of
2890 recomputing it from the PLT offset. */
2891 bfd_signed_vma got_offset;
2892 };
2893
2894 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2895 struct arm_local_iplt_info
2896 {
2897 /* The information that is usually found in the generic ELF part of
2898 the hash table entry. */
2899 union gotplt_union root;
2900
2901 /* The information that is usually found in the ARM-specific part of
2902 the hash table entry. */
2903 struct arm_plt_info arm;
2904
2905 /* A list of all potential dynamic relocations against this symbol. */
2906 struct elf_dyn_relocs *dyn_relocs;
2907 };
2908
2909 struct elf_arm_obj_tdata
2910 {
2911 struct elf_obj_tdata root;
2912
2913 /* tls_type for each local got entry. */
2914 char *local_got_tls_type;
2915
2916 /* GOTPLT entries for TLS descriptors. */
2917 bfd_vma *local_tlsdesc_gotent;
2918
2919 /* Information for local symbols that need entries in .iplt. */
2920 struct arm_local_iplt_info **local_iplt;
2921
2922 /* Zero to warn when linking objects with incompatible enum sizes. */
2923 int no_enum_size_warning;
2924
2925 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2926 int no_wchar_size_warning;
2927 };
2928
2929 #define elf_arm_tdata(bfd) \
2930 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2931
2932 #define elf32_arm_local_got_tls_type(bfd) \
2933 (elf_arm_tdata (bfd)->local_got_tls_type)
2934
2935 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2936 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2937
2938 #define elf32_arm_local_iplt(bfd) \
2939 (elf_arm_tdata (bfd)->local_iplt)
2940
2941 #define is_arm_elf(bfd) \
2942 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2943 && elf_tdata (bfd) != NULL \
2944 && elf_object_id (bfd) == ARM_ELF_DATA)
2945
2946 static bfd_boolean
2947 elf32_arm_mkobject (bfd *abfd)
2948 {
2949 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2950 ARM_ELF_DATA);
2951 }
2952
2953 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2954
2955 /* Arm ELF linker hash entry. */
2956 struct elf32_arm_link_hash_entry
2957 {
2958 struct elf_link_hash_entry root;
2959
2960 /* Track dynamic relocs copied for this symbol. */
2961 struct elf_dyn_relocs *dyn_relocs;
2962
2963 /* ARM-specific PLT information. */
2964 struct arm_plt_info plt;
2965
2966 #define GOT_UNKNOWN 0
2967 #define GOT_NORMAL 1
2968 #define GOT_TLS_GD 2
2969 #define GOT_TLS_IE 4
2970 #define GOT_TLS_GDESC 8
2971 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2972 unsigned int tls_type : 8;
2973
2974 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2975 unsigned int is_iplt : 1;
2976
2977 unsigned int unused : 23;
2978
2979 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2980 starting at the end of the jump table. */
2981 bfd_vma tlsdesc_got;
2982
2983 /* The symbol marking the real symbol location for exported thumb
2984 symbols with Arm stubs. */
2985 struct elf_link_hash_entry *export_glue;
2986
2987 /* A pointer to the most recently used stub hash entry against this
2988 symbol. */
2989 struct elf32_arm_stub_hash_entry *stub_cache;
2990 };
2991
2992 /* Traverse an arm ELF linker hash table. */
2993 #define elf32_arm_link_hash_traverse(table, func, info) \
2994 (elf_link_hash_traverse \
2995 (&(table)->root, \
2996 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2997 (info)))
2998
2999 /* Get the ARM elf linker hash table from a link_info structure. */
3000 #define elf32_arm_hash_table(info) \
3001 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3002 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3003
3004 #define arm_stub_hash_lookup(table, string, create, copy) \
3005 ((struct elf32_arm_stub_hash_entry *) \
3006 bfd_hash_lookup ((table), (string), (create), (copy)))
3007
3008 /* Array to keep track of which stub sections have been created, and
3009 information on stub grouping. */
3010 struct map_stub
3011 {
3012 /* This is the section to which stubs in the group will be
3013 attached. */
3014 asection *link_sec;
3015 /* The stub section. */
3016 asection *stub_sec;
3017 };
3018
3019 #define elf32_arm_compute_jump_table_size(htab) \
3020 ((htab)->next_tls_desc_index * 4)
3021
3022 /* ARM ELF linker hash table. */
3023 struct elf32_arm_link_hash_table
3024 {
3025 /* The main hash table. */
3026 struct elf_link_hash_table root;
3027
3028 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3029 bfd_size_type thumb_glue_size;
3030
3031 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3032 bfd_size_type arm_glue_size;
3033
3034 /* The size in bytes of section containing the ARMv4 BX veneers. */
3035 bfd_size_type bx_glue_size;
3036
3037 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3038 veneer has been populated. */
3039 bfd_vma bx_glue_offset[15];
3040
3041 /* The size in bytes of the section containing glue for VFP11 erratum
3042 veneers. */
3043 bfd_size_type vfp11_erratum_glue_size;
3044
3045 /* The size in bytes of the section containing glue for STM32L4XX erratum
3046 veneers. */
3047 bfd_size_type stm32l4xx_erratum_glue_size;
3048
3049 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3050 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3051 elf32_arm_write_section(). */
3052 struct a8_erratum_fix *a8_erratum_fixes;
3053 unsigned int num_a8_erratum_fixes;
3054
3055 /* An arbitrary input BFD chosen to hold the glue sections. */
3056 bfd * bfd_of_glue_owner;
3057
3058 /* Nonzero to output a BE8 image. */
3059 int byteswap_code;
3060
3061 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3062 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3063 int target1_is_rel;
3064
3065 /* The relocation to use for R_ARM_TARGET2 relocations. */
3066 int target2_reloc;
3067
3068 /* 0 = Ignore R_ARM_V4BX.
3069 1 = Convert BX to MOV PC.
3070 2 = Generate v4 interworing stubs. */
3071 int fix_v4bx;
3072
3073 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3074 int fix_cortex_a8;
3075
3076 /* Whether we should fix the ARM1176 BLX immediate issue. */
3077 int fix_arm1176;
3078
3079 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3080 int use_blx;
3081
3082 /* What sort of code sequences we should look for which may trigger the
3083 VFP11 denorm erratum. */
3084 bfd_arm_vfp11_fix vfp11_fix;
3085
3086 /* Global counter for the number of fixes we have emitted. */
3087 int num_vfp11_fixes;
3088
3089 /* What sort of code sequences we should look for which may trigger the
3090 STM32L4XX erratum. */
3091 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3092
3093 /* Global counter for the number of fixes we have emitted. */
3094 int num_stm32l4xx_fixes;
3095
3096 /* Nonzero to force PIC branch veneers. */
3097 int pic_veneer;
3098
3099 /* The number of bytes in the initial entry in the PLT. */
3100 bfd_size_type plt_header_size;
3101
3102 /* The number of bytes in the subsequent PLT etries. */
3103 bfd_size_type plt_entry_size;
3104
3105 /* True if the target system is VxWorks. */
3106 int vxworks_p;
3107
3108 /* True if the target system is Symbian OS. */
3109 int symbian_p;
3110
3111 /* True if the target system is Native Client. */
3112 int nacl_p;
3113
3114 /* True if the target uses REL relocations. */
3115 int use_rel;
3116
3117 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3118 bfd_vma next_tls_desc_index;
3119
3120 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3121 bfd_vma num_tls_desc;
3122
3123 /* Short-cuts to get to dynamic linker sections. */
3124 asection *sdynbss;
3125 asection *srelbss;
3126
3127 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3128 asection *srelplt2;
3129
3130 /* The offset into splt of the PLT entry for the TLS descriptor
3131 resolver. Special values are 0, if not necessary (or not found
3132 to be necessary yet), and -1 if needed but not determined
3133 yet. */
3134 bfd_vma dt_tlsdesc_plt;
3135
3136 /* The offset into sgot of the GOT entry used by the PLT entry
3137 above. */
3138 bfd_vma dt_tlsdesc_got;
3139
3140 /* Offset in .plt section of tls_arm_trampoline. */
3141 bfd_vma tls_trampoline;
3142
3143 /* Data for R_ARM_TLS_LDM32 relocations. */
3144 union
3145 {
3146 bfd_signed_vma refcount;
3147 bfd_vma offset;
3148 } tls_ldm_got;
3149
3150 /* Small local sym cache. */
3151 struct sym_cache sym_cache;
3152
3153 /* For convenience in allocate_dynrelocs. */
3154 bfd * obfd;
3155
3156 /* The amount of space used by the reserved portion of the sgotplt
3157 section, plus whatever space is used by the jump slots. */
3158 bfd_vma sgotplt_jump_table_size;
3159
3160 /* The stub hash table. */
3161 struct bfd_hash_table stub_hash_table;
3162
3163 /* Linker stub bfd. */
3164 bfd *stub_bfd;
3165
3166 /* Linker call-backs. */
3167 asection * (*add_stub_section) (const char *, asection *, asection *,
3168 unsigned int);
3169 void (*layout_sections_again) (void);
3170
3171 /* Array to keep track of which stub sections have been created, and
3172 information on stub grouping. */
3173 struct map_stub *stub_group;
3174
3175 /* Number of elements in stub_group. */
3176 unsigned int top_id;
3177
3178 /* Assorted information used by elf32_arm_size_stubs. */
3179 unsigned int bfd_count;
3180 unsigned int top_index;
3181 asection **input_list;
3182 };
3183
3184 static inline int
3185 ctz (unsigned int mask)
3186 {
3187 #if GCC_VERSION >= 3004
3188 return __builtin_ctz (mask);
3189 #else
3190 unsigned int i;
3191
3192 for (i = 0; i < 8 * sizeof (mask); i++)
3193 {
3194 if (mask & 0x1)
3195 break;
3196 mask = (mask >> 1);
3197 }
3198 return i;
3199 #endif
3200 }
3201
3202 static inline int
3203 popcount (unsigned int mask)
3204 {
3205 #if GCC_VERSION >= 3004
3206 return __builtin_popcount (mask);
3207 #else
3208 unsigned int i, sum = 0;
3209
3210 for (i = 0; i < 8 * sizeof (mask); i++)
3211 {
3212 if (mask & 0x1)
3213 sum++;
3214 mask = (mask >> 1);
3215 }
3216 return sum;
3217 #endif
3218 }
3219
3220 /* Create an entry in an ARM ELF linker hash table. */
3221
3222 static struct bfd_hash_entry *
3223 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3224 struct bfd_hash_table * table,
3225 const char * string)
3226 {
3227 struct elf32_arm_link_hash_entry * ret =
3228 (struct elf32_arm_link_hash_entry *) entry;
3229
3230 /* Allocate the structure if it has not already been allocated by a
3231 subclass. */
3232 if (ret == NULL)
3233 ret = (struct elf32_arm_link_hash_entry *)
3234 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3235 if (ret == NULL)
3236 return (struct bfd_hash_entry *) ret;
3237
3238 /* Call the allocation method of the superclass. */
3239 ret = ((struct elf32_arm_link_hash_entry *)
3240 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3241 table, string));
3242 if (ret != NULL)
3243 {
3244 ret->dyn_relocs = NULL;
3245 ret->tls_type = GOT_UNKNOWN;
3246 ret->tlsdesc_got = (bfd_vma) -1;
3247 ret->plt.thumb_refcount = 0;
3248 ret->plt.maybe_thumb_refcount = 0;
3249 ret->plt.noncall_refcount = 0;
3250 ret->plt.got_offset = -1;
3251 ret->is_iplt = FALSE;
3252 ret->export_glue = NULL;
3253
3254 ret->stub_cache = NULL;
3255 }
3256
3257 return (struct bfd_hash_entry *) ret;
3258 }
3259
3260 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3261 symbols. */
3262
3263 static bfd_boolean
3264 elf32_arm_allocate_local_sym_info (bfd *abfd)
3265 {
3266 if (elf_local_got_refcounts (abfd) == NULL)
3267 {
3268 bfd_size_type num_syms;
3269 bfd_size_type size;
3270 char *data;
3271
3272 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3273 size = num_syms * (sizeof (bfd_signed_vma)
3274 + sizeof (struct arm_local_iplt_info *)
3275 + sizeof (bfd_vma)
3276 + sizeof (char));
3277 data = bfd_zalloc (abfd, size);
3278 if (data == NULL)
3279 return FALSE;
3280
3281 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3282 data += num_syms * sizeof (bfd_signed_vma);
3283
3284 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3285 data += num_syms * sizeof (struct arm_local_iplt_info *);
3286
3287 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3288 data += num_syms * sizeof (bfd_vma);
3289
3290 elf32_arm_local_got_tls_type (abfd) = data;
3291 }
3292 return TRUE;
3293 }
3294
3295 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3296 to input bfd ABFD. Create the information if it doesn't already exist.
3297 Return null if an allocation fails. */
3298
3299 static struct arm_local_iplt_info *
3300 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3301 {
3302 struct arm_local_iplt_info **ptr;
3303
3304 if (!elf32_arm_allocate_local_sym_info (abfd))
3305 return NULL;
3306
3307 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3308 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3309 if (*ptr == NULL)
3310 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3311 return *ptr;
3312 }
3313
3314 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3315 in ABFD's symbol table. If the symbol is global, H points to its
3316 hash table entry, otherwise H is null.
3317
3318 Return true if the symbol does have PLT information. When returning
3319 true, point *ROOT_PLT at the target-independent reference count/offset
3320 union and *ARM_PLT at the ARM-specific information. */
3321
3322 static bfd_boolean
3323 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3324 unsigned long r_symndx, union gotplt_union **root_plt,
3325 struct arm_plt_info **arm_plt)
3326 {
3327 struct arm_local_iplt_info *local_iplt;
3328
3329 if (h != NULL)
3330 {
3331 *root_plt = &h->root.plt;
3332 *arm_plt = &h->plt;
3333 return TRUE;
3334 }
3335
3336 if (elf32_arm_local_iplt (abfd) == NULL)
3337 return FALSE;
3338
3339 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3340 if (local_iplt == NULL)
3341 return FALSE;
3342
3343 *root_plt = &local_iplt->root;
3344 *arm_plt = &local_iplt->arm;
3345 return TRUE;
3346 }
3347
3348 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3349 before it. */
3350
3351 static bfd_boolean
3352 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3353 struct arm_plt_info *arm_plt)
3354 {
3355 struct elf32_arm_link_hash_table *htab;
3356
3357 htab = elf32_arm_hash_table (info);
3358 return (arm_plt->thumb_refcount != 0
3359 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3360 }
3361
3362 /* Return a pointer to the head of the dynamic reloc list that should
3363 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3364 ABFD's symbol table. Return null if an error occurs. */
3365
3366 static struct elf_dyn_relocs **
3367 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3368 Elf_Internal_Sym *isym)
3369 {
3370 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3371 {
3372 struct arm_local_iplt_info *local_iplt;
3373
3374 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3375 if (local_iplt == NULL)
3376 return NULL;
3377 return &local_iplt->dyn_relocs;
3378 }
3379 else
3380 {
3381 /* Track dynamic relocs needed for local syms too.
3382 We really need local syms available to do this
3383 easily. Oh well. */
3384 asection *s;
3385 void *vpp;
3386
3387 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3388 if (s == NULL)
3389 abort ();
3390
3391 vpp = &elf_section_data (s)->local_dynrel;
3392 return (struct elf_dyn_relocs **) vpp;
3393 }
3394 }
3395
3396 /* Initialize an entry in the stub hash table. */
3397
3398 static struct bfd_hash_entry *
3399 stub_hash_newfunc (struct bfd_hash_entry *entry,
3400 struct bfd_hash_table *table,
3401 const char *string)
3402 {
3403 /* Allocate the structure if it has not already been allocated by a
3404 subclass. */
3405 if (entry == NULL)
3406 {
3407 entry = (struct bfd_hash_entry *)
3408 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3409 if (entry == NULL)
3410 return entry;
3411 }
3412
3413 /* Call the allocation method of the superclass. */
3414 entry = bfd_hash_newfunc (entry, table, string);
3415 if (entry != NULL)
3416 {
3417 struct elf32_arm_stub_hash_entry *eh;
3418
3419 /* Initialize the local fields. */
3420 eh = (struct elf32_arm_stub_hash_entry *) entry;
3421 eh->stub_sec = NULL;
3422 eh->stub_offset = 0;
3423 eh->source_value = 0;
3424 eh->target_value = 0;
3425 eh->target_section = NULL;
3426 eh->orig_insn = 0;
3427 eh->stub_type = arm_stub_none;
3428 eh->stub_size = 0;
3429 eh->stub_template = NULL;
3430 eh->stub_template_size = 0;
3431 eh->h = NULL;
3432 eh->id_sec = NULL;
3433 eh->output_name = NULL;
3434 }
3435
3436 return entry;
3437 }
3438
3439 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3440 shortcuts to them in our hash table. */
3441
3442 static bfd_boolean
3443 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3444 {
3445 struct elf32_arm_link_hash_table *htab;
3446
3447 htab = elf32_arm_hash_table (info);
3448 if (htab == NULL)
3449 return FALSE;
3450
3451 /* BPABI objects never have a GOT, or associated sections. */
3452 if (htab->symbian_p)
3453 return TRUE;
3454
3455 if (! _bfd_elf_create_got_section (dynobj, info))
3456 return FALSE;
3457
3458 return TRUE;
3459 }
3460
3461 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3462
3463 static bfd_boolean
3464 create_ifunc_sections (struct bfd_link_info *info)
3465 {
3466 struct elf32_arm_link_hash_table *htab;
3467 const struct elf_backend_data *bed;
3468 bfd *dynobj;
3469 asection *s;
3470 flagword flags;
3471
3472 htab = elf32_arm_hash_table (info);
3473 dynobj = htab->root.dynobj;
3474 bed = get_elf_backend_data (dynobj);
3475 flags = bed->dynamic_sec_flags;
3476
3477 if (htab->root.iplt == NULL)
3478 {
3479 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3480 flags | SEC_READONLY | SEC_CODE);
3481 if (s == NULL
3482 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3483 return FALSE;
3484 htab->root.iplt = s;
3485 }
3486
3487 if (htab->root.irelplt == NULL)
3488 {
3489 s = bfd_make_section_anyway_with_flags (dynobj,
3490 RELOC_SECTION (htab, ".iplt"),
3491 flags | SEC_READONLY);
3492 if (s == NULL
3493 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3494 return FALSE;
3495 htab->root.irelplt = s;
3496 }
3497
3498 if (htab->root.igotplt == NULL)
3499 {
3500 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3501 if (s == NULL
3502 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3503 return FALSE;
3504 htab->root.igotplt = s;
3505 }
3506 return TRUE;
3507 }
3508
3509 /* Determine if we're dealing with a Thumb only architecture. */
3510
3511 static bfd_boolean
3512 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3513 {
3514 int arch;
3515 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3516 Tag_CPU_arch_profile);
3517
3518 if (profile)
3519 return profile == 'M';
3520
3521 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3522
3523 if (arch == TAG_CPU_ARCH_V6_M
3524 || arch == TAG_CPU_ARCH_V6S_M
3525 || arch == TAG_CPU_ARCH_V7E_M
3526 || arch == TAG_CPU_ARCH_V8M_BASE
3527 || arch == TAG_CPU_ARCH_V8M_MAIN)
3528 return TRUE;
3529
3530 return FALSE;
3531 }
3532
3533 /* Determine if we're dealing with a Thumb-2 object. */
3534
3535 static bfd_boolean
3536 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3537 {
3538 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3539 Tag_CPU_arch);
3540 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3541 }
3542
3543 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3544 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3545 hash table. */
3546
3547 static bfd_boolean
3548 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3549 {
3550 struct elf32_arm_link_hash_table *htab;
3551
3552 htab = elf32_arm_hash_table (info);
3553 if (htab == NULL)
3554 return FALSE;
3555
3556 if (!htab->root.sgot && !create_got_section (dynobj, info))
3557 return FALSE;
3558
3559 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3560 return FALSE;
3561
3562 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3563 if (!bfd_link_pic (info))
3564 htab->srelbss = bfd_get_linker_section (dynobj,
3565 RELOC_SECTION (htab, ".bss"));
3566
3567 if (htab->vxworks_p)
3568 {
3569 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3570 return FALSE;
3571
3572 if (bfd_link_pic (info))
3573 {
3574 htab->plt_header_size = 0;
3575 htab->plt_entry_size
3576 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3577 }
3578 else
3579 {
3580 htab->plt_header_size
3581 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3582 htab->plt_entry_size
3583 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3584 }
3585
3586 if (elf_elfheader (dynobj))
3587 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3588 }
3589 else
3590 {
3591 /* PR ld/16017
3592 Test for thumb only architectures. Note - we cannot just call
3593 using_thumb_only() as the attributes in the output bfd have not been
3594 initialised at this point, so instead we use the input bfd. */
3595 bfd * saved_obfd = htab->obfd;
3596
3597 htab->obfd = dynobj;
3598 if (using_thumb_only (htab))
3599 {
3600 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3601 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3602 }
3603 htab->obfd = saved_obfd;
3604 }
3605
3606 if (!htab->root.splt
3607 || !htab->root.srelplt
3608 || !htab->sdynbss
3609 || (!bfd_link_pic (info) && !htab->srelbss))
3610 abort ();
3611
3612 return TRUE;
3613 }
3614
3615 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3616
3617 static void
3618 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3619 struct elf_link_hash_entry *dir,
3620 struct elf_link_hash_entry *ind)
3621 {
3622 struct elf32_arm_link_hash_entry *edir, *eind;
3623
3624 edir = (struct elf32_arm_link_hash_entry *) dir;
3625 eind = (struct elf32_arm_link_hash_entry *) ind;
3626
3627 if (eind->dyn_relocs != NULL)
3628 {
3629 if (edir->dyn_relocs != NULL)
3630 {
3631 struct elf_dyn_relocs **pp;
3632 struct elf_dyn_relocs *p;
3633
3634 /* Add reloc counts against the indirect sym to the direct sym
3635 list. Merge any entries against the same section. */
3636 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3637 {
3638 struct elf_dyn_relocs *q;
3639
3640 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3641 if (q->sec == p->sec)
3642 {
3643 q->pc_count += p->pc_count;
3644 q->count += p->count;
3645 *pp = p->next;
3646 break;
3647 }
3648 if (q == NULL)
3649 pp = &p->next;
3650 }
3651 *pp = edir->dyn_relocs;
3652 }
3653
3654 edir->dyn_relocs = eind->dyn_relocs;
3655 eind->dyn_relocs = NULL;
3656 }
3657
3658 if (ind->root.type == bfd_link_hash_indirect)
3659 {
3660 /* Copy over PLT info. */
3661 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3662 eind->plt.thumb_refcount = 0;
3663 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3664 eind->plt.maybe_thumb_refcount = 0;
3665 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3666 eind->plt.noncall_refcount = 0;
3667
3668 /* We should only allocate a function to .iplt once the final
3669 symbol information is known. */
3670 BFD_ASSERT (!eind->is_iplt);
3671
3672 if (dir->got.refcount <= 0)
3673 {
3674 edir->tls_type = eind->tls_type;
3675 eind->tls_type = GOT_UNKNOWN;
3676 }
3677 }
3678
3679 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3680 }
3681
3682 /* Destroy an ARM elf linker hash table. */
3683
3684 static void
3685 elf32_arm_link_hash_table_free (bfd *obfd)
3686 {
3687 struct elf32_arm_link_hash_table *ret
3688 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3689
3690 bfd_hash_table_free (&ret->stub_hash_table);
3691 _bfd_elf_link_hash_table_free (obfd);
3692 }
3693
3694 /* Create an ARM elf linker hash table. */
3695
3696 static struct bfd_link_hash_table *
3697 elf32_arm_link_hash_table_create (bfd *abfd)
3698 {
3699 struct elf32_arm_link_hash_table *ret;
3700 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3701
3702 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3703 if (ret == NULL)
3704 return NULL;
3705
3706 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3707 elf32_arm_link_hash_newfunc,
3708 sizeof (struct elf32_arm_link_hash_entry),
3709 ARM_ELF_DATA))
3710 {
3711 free (ret);
3712 return NULL;
3713 }
3714
3715 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3716 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3717 #ifdef FOUR_WORD_PLT
3718 ret->plt_header_size = 16;
3719 ret->plt_entry_size = 16;
3720 #else
3721 ret->plt_header_size = 20;
3722 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3723 #endif
3724 ret->use_rel = 1;
3725 ret->obfd = abfd;
3726
3727 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3728 sizeof (struct elf32_arm_stub_hash_entry)))
3729 {
3730 _bfd_elf_link_hash_table_free (abfd);
3731 return NULL;
3732 }
3733 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3734
3735 return &ret->root.root;
3736 }
3737
3738 /* Determine what kind of NOPs are available. */
3739
3740 static bfd_boolean
3741 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3742 {
3743 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3744 Tag_CPU_arch);
3745 return arch == TAG_CPU_ARCH_V6T2
3746 || arch == TAG_CPU_ARCH_V6K
3747 || arch == TAG_CPU_ARCH_V7
3748 || arch == TAG_CPU_ARCH_V7E_M;
3749 }
3750
3751 static bfd_boolean
3752 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3753 {
3754 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3755 Tag_CPU_arch);
3756 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3757 || arch == TAG_CPU_ARCH_V7E_M);
3758 }
3759
3760 static bfd_boolean
3761 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3762 {
3763 switch (stub_type)
3764 {
3765 case arm_stub_long_branch_thumb_only:
3766 case arm_stub_long_branch_v4t_thumb_arm:
3767 case arm_stub_short_branch_v4t_thumb_arm:
3768 case arm_stub_long_branch_v4t_thumb_arm_pic:
3769 case arm_stub_long_branch_v4t_thumb_tls_pic:
3770 case arm_stub_long_branch_thumb_only_pic:
3771 return TRUE;
3772 case arm_stub_none:
3773 BFD_FAIL ();
3774 return FALSE;
3775 break;
3776 default:
3777 return FALSE;
3778 }
3779 }
3780
3781 /* Determine the type of stub needed, if any, for a call. */
3782
3783 static enum elf32_arm_stub_type
3784 arm_type_of_stub (struct bfd_link_info *info,
3785 asection *input_sec,
3786 const Elf_Internal_Rela *rel,
3787 unsigned char st_type,
3788 enum arm_st_branch_type *actual_branch_type,
3789 struct elf32_arm_link_hash_entry *hash,
3790 bfd_vma destination,
3791 asection *sym_sec,
3792 bfd *input_bfd,
3793 const char *name)
3794 {
3795 bfd_vma location;
3796 bfd_signed_vma branch_offset;
3797 unsigned int r_type;
3798 struct elf32_arm_link_hash_table * globals;
3799 int thumb2;
3800 int thumb_only;
3801 enum elf32_arm_stub_type stub_type = arm_stub_none;
3802 int use_plt = 0;
3803 enum arm_st_branch_type branch_type = *actual_branch_type;
3804 union gotplt_union *root_plt;
3805 struct arm_plt_info *arm_plt;
3806
3807 if (branch_type == ST_BRANCH_LONG)
3808 return stub_type;
3809
3810 globals = elf32_arm_hash_table (info);
3811 if (globals == NULL)
3812 return stub_type;
3813
3814 thumb_only = using_thumb_only (globals);
3815
3816 thumb2 = using_thumb2 (globals);
3817
3818 /* Determine where the call point is. */
3819 location = (input_sec->output_offset
3820 + input_sec->output_section->vma
3821 + rel->r_offset);
3822
3823 r_type = ELF32_R_TYPE (rel->r_info);
3824
3825 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3826 are considering a function call relocation. */
3827 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3828 || r_type == R_ARM_THM_JUMP19)
3829 && branch_type == ST_BRANCH_TO_ARM)
3830 branch_type = ST_BRANCH_TO_THUMB;
3831
3832 /* For TLS call relocs, it is the caller's responsibility to provide
3833 the address of the appropriate trampoline. */
3834 if (r_type != R_ARM_TLS_CALL
3835 && r_type != R_ARM_THM_TLS_CALL
3836 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3837 &root_plt, &arm_plt)
3838 && root_plt->offset != (bfd_vma) -1)
3839 {
3840 asection *splt;
3841
3842 if (hash == NULL || hash->is_iplt)
3843 splt = globals->root.iplt;
3844 else
3845 splt = globals->root.splt;
3846 if (splt != NULL)
3847 {
3848 use_plt = 1;
3849
3850 /* Note when dealing with PLT entries: the main PLT stub is in
3851 ARM mode, so if the branch is in Thumb mode, another
3852 Thumb->ARM stub will be inserted later just before the ARM
3853 PLT stub. We don't take this extra distance into account
3854 here, because if a long branch stub is needed, we'll add a
3855 Thumb->Arm one and branch directly to the ARM PLT entry
3856 because it avoids spreading offset corrections in several
3857 places. */
3858
3859 destination = (splt->output_section->vma
3860 + splt->output_offset
3861 + root_plt->offset);
3862 st_type = STT_FUNC;
3863 branch_type = ST_BRANCH_TO_ARM;
3864 }
3865 }
3866 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3867 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3868
3869 branch_offset = (bfd_signed_vma)(destination - location);
3870
3871 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3872 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3873 {
3874 /* Handle cases where:
3875 - this call goes too far (different Thumb/Thumb2 max
3876 distance)
3877 - it's a Thumb->Arm call and blx is not available, or it's a
3878 Thumb->Arm branch (not bl). A stub is needed in this case,
3879 but only if this call is not through a PLT entry. Indeed,
3880 PLT stubs handle mode switching already.
3881 */
3882 if ((!thumb2
3883 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3884 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3885 || (thumb2
3886 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3887 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3888 || (thumb2
3889 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3890 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3891 && (r_type == R_ARM_THM_JUMP19))
3892 || (branch_type == ST_BRANCH_TO_ARM
3893 && (((r_type == R_ARM_THM_CALL
3894 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3895 || (r_type == R_ARM_THM_JUMP24)
3896 || (r_type == R_ARM_THM_JUMP19))
3897 && !use_plt))
3898 {
3899 if (branch_type == ST_BRANCH_TO_THUMB)
3900 {
3901 /* Thumb to thumb. */
3902 if (!thumb_only)
3903 {
3904 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3905 /* PIC stubs. */
3906 ? ((globals->use_blx
3907 && (r_type == R_ARM_THM_CALL))
3908 /* V5T and above. Stub starts with ARM code, so
3909 we must be able to switch mode before
3910 reaching it, which is only possible for 'bl'
3911 (ie R_ARM_THM_CALL relocation). */
3912 ? arm_stub_long_branch_any_thumb_pic
3913 /* On V4T, use Thumb code only. */
3914 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3915
3916 /* non-PIC stubs. */
3917 : ((globals->use_blx
3918 && (r_type == R_ARM_THM_CALL))
3919 /* V5T and above. */
3920 ? arm_stub_long_branch_any_any
3921 /* V4T. */
3922 : arm_stub_long_branch_v4t_thumb_thumb);
3923 }
3924 else
3925 {
3926 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3927 /* PIC stub. */
3928 ? arm_stub_long_branch_thumb_only_pic
3929 /* non-PIC stub. */
3930 : arm_stub_long_branch_thumb_only;
3931 }
3932 }
3933 else
3934 {
3935 /* Thumb to arm. */
3936 if (sym_sec != NULL
3937 && sym_sec->owner != NULL
3938 && !INTERWORK_FLAG (sym_sec->owner))
3939 {
3940 (*_bfd_error_handler)
3941 (_("%B(%s): warning: interworking not enabled.\n"
3942 " first occurrence: %B: Thumb call to ARM"),
3943 sym_sec->owner, input_bfd, name);
3944 }
3945
3946 stub_type =
3947 (bfd_link_pic (info) | globals->pic_veneer)
3948 /* PIC stubs. */
3949 ? (r_type == R_ARM_THM_TLS_CALL
3950 /* TLS PIC stubs. */
3951 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3952 : arm_stub_long_branch_v4t_thumb_tls_pic)
3953 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3954 /* V5T PIC and above. */
3955 ? arm_stub_long_branch_any_arm_pic
3956 /* V4T PIC stub. */
3957 : arm_stub_long_branch_v4t_thumb_arm_pic))
3958
3959 /* non-PIC stubs. */
3960 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3961 /* V5T and above. */
3962 ? arm_stub_long_branch_any_any
3963 /* V4T. */
3964 : arm_stub_long_branch_v4t_thumb_arm);
3965
3966 /* Handle v4t short branches. */
3967 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3968 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3969 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3970 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3971 }
3972 }
3973 }
3974 else if (r_type == R_ARM_CALL
3975 || r_type == R_ARM_JUMP24
3976 || r_type == R_ARM_PLT32
3977 || r_type == R_ARM_TLS_CALL)
3978 {
3979 if (branch_type == ST_BRANCH_TO_THUMB)
3980 {
3981 /* Arm to thumb. */
3982
3983 if (sym_sec != NULL
3984 && sym_sec->owner != NULL
3985 && !INTERWORK_FLAG (sym_sec->owner))
3986 {
3987 (*_bfd_error_handler)
3988 (_("%B(%s): warning: interworking not enabled.\n"
3989 " first occurrence: %B: ARM call to Thumb"),
3990 sym_sec->owner, input_bfd, name);
3991 }
3992
3993 /* We have an extra 2-bytes reach because of
3994 the mode change (bit 24 (H) of BLX encoding). */
3995 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3996 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3997 || (r_type == R_ARM_CALL && !globals->use_blx)
3998 || (r_type == R_ARM_JUMP24)
3999 || (r_type == R_ARM_PLT32))
4000 {
4001 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4002 /* PIC stubs. */
4003 ? ((globals->use_blx)
4004 /* V5T and above. */
4005 ? arm_stub_long_branch_any_thumb_pic
4006 /* V4T stub. */
4007 : arm_stub_long_branch_v4t_arm_thumb_pic)
4008
4009 /* non-PIC stubs. */
4010 : ((globals->use_blx)
4011 /* V5T and above. */
4012 ? arm_stub_long_branch_any_any
4013 /* V4T. */
4014 : arm_stub_long_branch_v4t_arm_thumb);
4015 }
4016 }
4017 else
4018 {
4019 /* Arm to arm. */
4020 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4021 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4022 {
4023 stub_type =
4024 (bfd_link_pic (info) | globals->pic_veneer)
4025 /* PIC stubs. */
4026 ? (r_type == R_ARM_TLS_CALL
4027 /* TLS PIC Stub. */
4028 ? arm_stub_long_branch_any_tls_pic
4029 : (globals->nacl_p
4030 ? arm_stub_long_branch_arm_nacl_pic
4031 : arm_stub_long_branch_any_arm_pic))
4032 /* non-PIC stubs. */
4033 : (globals->nacl_p
4034 ? arm_stub_long_branch_arm_nacl
4035 : arm_stub_long_branch_any_any);
4036 }
4037 }
4038 }
4039
4040 /* If a stub is needed, record the actual destination type. */
4041 if (stub_type != arm_stub_none)
4042 *actual_branch_type = branch_type;
4043
4044 return stub_type;
4045 }
4046
4047 /* Build a name for an entry in the stub hash table. */
4048
4049 static char *
4050 elf32_arm_stub_name (const asection *input_section,
4051 const asection *sym_sec,
4052 const struct elf32_arm_link_hash_entry *hash,
4053 const Elf_Internal_Rela *rel,
4054 enum elf32_arm_stub_type stub_type)
4055 {
4056 char *stub_name;
4057 bfd_size_type len;
4058
4059 if (hash)
4060 {
4061 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4062 stub_name = (char *) bfd_malloc (len);
4063 if (stub_name != NULL)
4064 sprintf (stub_name, "%08x_%s+%x_%d",
4065 input_section->id & 0xffffffff,
4066 hash->root.root.root.string,
4067 (int) rel->r_addend & 0xffffffff,
4068 (int) stub_type);
4069 }
4070 else
4071 {
4072 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4073 stub_name = (char *) bfd_malloc (len);
4074 if (stub_name != NULL)
4075 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4076 input_section->id & 0xffffffff,
4077 sym_sec->id & 0xffffffff,
4078 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4079 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4080 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4081 (int) rel->r_addend & 0xffffffff,
4082 (int) stub_type);
4083 }
4084
4085 return stub_name;
4086 }
4087
4088 /* Look up an entry in the stub hash. Stub entries are cached because
4089 creating the stub name takes a bit of time. */
4090
4091 static struct elf32_arm_stub_hash_entry *
4092 elf32_arm_get_stub_entry (const asection *input_section,
4093 const asection *sym_sec,
4094 struct elf_link_hash_entry *hash,
4095 const Elf_Internal_Rela *rel,
4096 struct elf32_arm_link_hash_table *htab,
4097 enum elf32_arm_stub_type stub_type)
4098 {
4099 struct elf32_arm_stub_hash_entry *stub_entry;
4100 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4101 const asection *id_sec;
4102
4103 if ((input_section->flags & SEC_CODE) == 0)
4104 return NULL;
4105
4106 /* If this input section is part of a group of sections sharing one
4107 stub section, then use the id of the first section in the group.
4108 Stub names need to include a section id, as there may well be
4109 more than one stub used to reach say, printf, and we need to
4110 distinguish between them. */
4111 id_sec = htab->stub_group[input_section->id].link_sec;
4112
4113 if (h != NULL && h->stub_cache != NULL
4114 && h->stub_cache->h == h
4115 && h->stub_cache->id_sec == id_sec
4116 && h->stub_cache->stub_type == stub_type)
4117 {
4118 stub_entry = h->stub_cache;
4119 }
4120 else
4121 {
4122 char *stub_name;
4123
4124 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4125 if (stub_name == NULL)
4126 return NULL;
4127
4128 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4129 stub_name, FALSE, FALSE);
4130 if (h != NULL)
4131 h->stub_cache = stub_entry;
4132
4133 free (stub_name);
4134 }
4135
4136 return stub_entry;
4137 }
4138
4139 /* Find or create a stub section. Returns a pointer to the stub section, and
4140 the section to which the stub section will be attached (in *LINK_SEC_P).
4141 LINK_SEC_P may be NULL. */
4142
4143 static asection *
4144 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4145 struct elf32_arm_link_hash_table *htab)
4146 {
4147 asection *link_sec;
4148 asection *stub_sec;
4149 asection *out_sec;
4150
4151 link_sec = htab->stub_group[section->id].link_sec;
4152 BFD_ASSERT (link_sec != NULL);
4153 stub_sec = htab->stub_group[section->id].stub_sec;
4154
4155 if (stub_sec == NULL)
4156 {
4157 stub_sec = htab->stub_group[link_sec->id].stub_sec;
4158 if (stub_sec == NULL)
4159 {
4160 size_t namelen;
4161 bfd_size_type len;
4162 char *s_name;
4163
4164 namelen = strlen (link_sec->name);
4165 len = namelen + sizeof (STUB_SUFFIX);
4166 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4167 if (s_name == NULL)
4168 return NULL;
4169
4170 memcpy (s_name, link_sec->name, namelen);
4171 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4172 out_sec = link_sec->output_section;
4173 stub_sec = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4174 htab->nacl_p ? 4 : 3);
4175 if (stub_sec == NULL)
4176 return NULL;
4177 htab->stub_group[link_sec->id].stub_sec = stub_sec;
4178 }
4179 htab->stub_group[section->id].stub_sec = stub_sec;
4180 }
4181
4182 if (link_sec_p)
4183 *link_sec_p = link_sec;
4184
4185 return stub_sec;
4186 }
4187
4188 /* Add a new stub entry to the stub hash. Not all fields of the new
4189 stub entry are initialised. */
4190
4191 static struct elf32_arm_stub_hash_entry *
4192 elf32_arm_add_stub (const char *stub_name,
4193 asection *section,
4194 struct elf32_arm_link_hash_table *htab)
4195 {
4196 asection *link_sec;
4197 asection *stub_sec;
4198 struct elf32_arm_stub_hash_entry *stub_entry;
4199
4200 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
4201 if (stub_sec == NULL)
4202 return NULL;
4203
4204 /* Enter this entry into the linker stub hash table. */
4205 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4206 TRUE, FALSE);
4207 if (stub_entry == NULL)
4208 {
4209 if (section == NULL)
4210 section = stub_sec;
4211 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4212 section->owner,
4213 stub_name);
4214 return NULL;
4215 }
4216
4217 stub_entry->stub_sec = stub_sec;
4218 stub_entry->stub_offset = 0;
4219 stub_entry->id_sec = link_sec;
4220
4221 return stub_entry;
4222 }
4223
4224 /* Store an Arm insn into an output section not processed by
4225 elf32_arm_write_section. */
4226
4227 static void
4228 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4229 bfd * output_bfd, bfd_vma val, void * ptr)
4230 {
4231 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4232 bfd_putl32 (val, ptr);
4233 else
4234 bfd_putb32 (val, ptr);
4235 }
4236
4237 /* Store a 16-bit Thumb insn into an output section not processed by
4238 elf32_arm_write_section. */
4239
4240 static void
4241 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4242 bfd * output_bfd, bfd_vma val, void * ptr)
4243 {
4244 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4245 bfd_putl16 (val, ptr);
4246 else
4247 bfd_putb16 (val, ptr);
4248 }
4249
4250 /* Store a Thumb2 insn into an output section not processed by
4251 elf32_arm_write_section. */
4252
4253 static void
4254 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4255 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4256 {
4257 /* T2 instructions are 16-bit streamed. */
4258 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4259 {
4260 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4261 bfd_putl16 ((val & 0xffff), ptr + 2);
4262 }
4263 else
4264 {
4265 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4266 bfd_putb16 ((val & 0xffff), ptr + 2);
4267 }
4268 }
4269
4270 /* If it's possible to change R_TYPE to a more efficient access
4271 model, return the new reloc type. */
4272
4273 static unsigned
4274 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4275 struct elf_link_hash_entry *h)
4276 {
4277 int is_local = (h == NULL);
4278
4279 if (bfd_link_pic (info)
4280 || (h && h->root.type == bfd_link_hash_undefweak))
4281 return r_type;
4282
4283 /* We do not support relaxations for Old TLS models. */
4284 switch (r_type)
4285 {
4286 case R_ARM_TLS_GOTDESC:
4287 case R_ARM_TLS_CALL:
4288 case R_ARM_THM_TLS_CALL:
4289 case R_ARM_TLS_DESCSEQ:
4290 case R_ARM_THM_TLS_DESCSEQ:
4291 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4292 }
4293
4294 return r_type;
4295 }
4296
4297 static bfd_reloc_status_type elf32_arm_final_link_relocate
4298 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4299 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4300 const char *, unsigned char, enum arm_st_branch_type,
4301 struct elf_link_hash_entry *, bfd_boolean *, char **);
4302
4303 static unsigned int
4304 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4305 {
4306 switch (stub_type)
4307 {
4308 case arm_stub_a8_veneer_b_cond:
4309 case arm_stub_a8_veneer_b:
4310 case arm_stub_a8_veneer_bl:
4311 return 2;
4312
4313 case arm_stub_long_branch_any_any:
4314 case arm_stub_long_branch_v4t_arm_thumb:
4315 case arm_stub_long_branch_thumb_only:
4316 case arm_stub_long_branch_v4t_thumb_thumb:
4317 case arm_stub_long_branch_v4t_thumb_arm:
4318 case arm_stub_short_branch_v4t_thumb_arm:
4319 case arm_stub_long_branch_any_arm_pic:
4320 case arm_stub_long_branch_any_thumb_pic:
4321 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4322 case arm_stub_long_branch_v4t_arm_thumb_pic:
4323 case arm_stub_long_branch_v4t_thumb_arm_pic:
4324 case arm_stub_long_branch_thumb_only_pic:
4325 case arm_stub_long_branch_any_tls_pic:
4326 case arm_stub_long_branch_v4t_thumb_tls_pic:
4327 case arm_stub_a8_veneer_blx:
4328 return 4;
4329
4330 case arm_stub_long_branch_arm_nacl:
4331 case arm_stub_long_branch_arm_nacl_pic:
4332 return 16;
4333
4334 default:
4335 abort (); /* Should be unreachable. */
4336 }
4337 }
4338
4339 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4340 veneering (TRUE) or have their own symbol (FALSE). */
4341
4342 static bfd_boolean
4343 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4344 {
4345 if (stub_type >= max_stub_type)
4346 abort (); /* Should be unreachable. */
4347
4348 return FALSE;
4349 }
4350
4351 static bfd_boolean
4352 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4353 void * in_arg)
4354 {
4355 #define MAXRELOCS 3
4356 struct elf32_arm_stub_hash_entry *stub_entry;
4357 struct elf32_arm_link_hash_table *globals;
4358 struct bfd_link_info *info;
4359 asection *stub_sec;
4360 bfd *stub_bfd;
4361 bfd_byte *loc;
4362 bfd_vma sym_value;
4363 int template_size;
4364 int size;
4365 const insn_sequence *template_sequence;
4366 int i;
4367 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4368 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4369 int nrelocs = 0;
4370
4371 /* Massage our args to the form they really have. */
4372 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4373 info = (struct bfd_link_info *) in_arg;
4374
4375 globals = elf32_arm_hash_table (info);
4376 if (globals == NULL)
4377 return FALSE;
4378
4379 stub_sec = stub_entry->stub_sec;
4380
4381 if ((globals->fix_cortex_a8 < 0)
4382 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4383 /* We have to do less-strictly-aligned fixes last. */
4384 return TRUE;
4385
4386 /* Make a note of the offset within the stubs for this entry. */
4387 stub_entry->stub_offset = stub_sec->size;
4388 loc = stub_sec->contents + stub_entry->stub_offset;
4389
4390 stub_bfd = stub_sec->owner;
4391
4392 /* This is the address of the stub destination. */
4393 sym_value = (stub_entry->target_value
4394 + stub_entry->target_section->output_offset
4395 + stub_entry->target_section->output_section->vma);
4396
4397 template_sequence = stub_entry->stub_template;
4398 template_size = stub_entry->stub_template_size;
4399
4400 size = 0;
4401 for (i = 0; i < template_size; i++)
4402 {
4403 switch (template_sequence[i].type)
4404 {
4405 case THUMB16_TYPE:
4406 {
4407 bfd_vma data = (bfd_vma) template_sequence[i].data;
4408 if (template_sequence[i].reloc_addend != 0)
4409 {
4410 /* We've borrowed the reloc_addend field to mean we should
4411 insert a condition code into this (Thumb-1 branch)
4412 instruction. See THUMB16_BCOND_INSN. */
4413 BFD_ASSERT ((data & 0xff00) == 0xd000);
4414 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4415 }
4416 bfd_put_16 (stub_bfd, data, loc + size);
4417 size += 2;
4418 }
4419 break;
4420
4421 case THUMB32_TYPE:
4422 bfd_put_16 (stub_bfd,
4423 (template_sequence[i].data >> 16) & 0xffff,
4424 loc + size);
4425 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4426 loc + size + 2);
4427 if (template_sequence[i].r_type != R_ARM_NONE)
4428 {
4429 stub_reloc_idx[nrelocs] = i;
4430 stub_reloc_offset[nrelocs++] = size;
4431 }
4432 size += 4;
4433 break;
4434
4435 case ARM_TYPE:
4436 bfd_put_32 (stub_bfd, template_sequence[i].data,
4437 loc + size);
4438 /* Handle cases where the target is encoded within the
4439 instruction. */
4440 if (template_sequence[i].r_type == R_ARM_JUMP24)
4441 {
4442 stub_reloc_idx[nrelocs] = i;
4443 stub_reloc_offset[nrelocs++] = size;
4444 }
4445 size += 4;
4446 break;
4447
4448 case DATA_TYPE:
4449 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4450 stub_reloc_idx[nrelocs] = i;
4451 stub_reloc_offset[nrelocs++] = size;
4452 size += 4;
4453 break;
4454
4455 default:
4456 BFD_FAIL ();
4457 return FALSE;
4458 }
4459 }
4460
4461 stub_sec->size += size;
4462
4463 /* Stub size has already been computed in arm_size_one_stub. Check
4464 consistency. */
4465 BFD_ASSERT (size == stub_entry->stub_size);
4466
4467 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4468 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4469 sym_value |= 1;
4470
4471 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4472 in each stub. */
4473 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4474
4475 for (i = 0; i < nrelocs; i++)
4476 {
4477 Elf_Internal_Rela rel;
4478 bfd_boolean unresolved_reloc;
4479 char *error_message;
4480 bfd_vma points_to =
4481 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4482
4483 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4484 rel.r_info = ELF32_R_INFO (0,
4485 template_sequence[stub_reloc_idx[i]].r_type);
4486 rel.r_addend = 0;
4487
4488 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4489 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4490 template should refer back to the instruction after the original
4491 branch. We use target_section as Cortex-A8 erratum workaround stubs
4492 are only generated when both source and target are in the same
4493 section. */
4494 points_to = stub_entry->target_section->output_section->vma
4495 + stub_entry->target_section->output_offset
4496 + stub_entry->source_value;
4497
4498 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4499 (template_sequence[stub_reloc_idx[i]].r_type),
4500 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4501 points_to, info, stub_entry->target_section, "", STT_FUNC,
4502 stub_entry->branch_type,
4503 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4504 &error_message);
4505 }
4506
4507 return TRUE;
4508 #undef MAXRELOCS
4509 }
4510
4511 /* Calculate the template, template size and instruction size for a stub.
4512 Return value is the instruction size. */
4513
4514 static unsigned int
4515 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4516 const insn_sequence **stub_template,
4517 int *stub_template_size)
4518 {
4519 const insn_sequence *template_sequence = NULL;
4520 int template_size = 0, i;
4521 unsigned int size;
4522
4523 template_sequence = stub_definitions[stub_type].template_sequence;
4524 if (stub_template)
4525 *stub_template = template_sequence;
4526
4527 template_size = stub_definitions[stub_type].template_size;
4528 if (stub_template_size)
4529 *stub_template_size = template_size;
4530
4531 size = 0;
4532 for (i = 0; i < template_size; i++)
4533 {
4534 switch (template_sequence[i].type)
4535 {
4536 case THUMB16_TYPE:
4537 size += 2;
4538 break;
4539
4540 case ARM_TYPE:
4541 case THUMB32_TYPE:
4542 case DATA_TYPE:
4543 size += 4;
4544 break;
4545
4546 default:
4547 BFD_FAIL ();
4548 return 0;
4549 }
4550 }
4551
4552 return size;
4553 }
4554
4555 /* As above, but don't actually build the stub. Just bump offset so
4556 we know stub section sizes. */
4557
4558 static bfd_boolean
4559 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4560 void *in_arg ATTRIBUTE_UNUSED)
4561 {
4562 struct elf32_arm_stub_hash_entry *stub_entry;
4563 const insn_sequence *template_sequence;
4564 int template_size, size;
4565
4566 /* Massage our args to the form they really have. */
4567 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4568
4569 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4570 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4571
4572 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4573 &template_size);
4574
4575 stub_entry->stub_size = size;
4576 stub_entry->stub_template = template_sequence;
4577 stub_entry->stub_template_size = template_size;
4578
4579 size = (size + 7) & ~7;
4580 stub_entry->stub_sec->size += size;
4581
4582 return TRUE;
4583 }
4584
4585 /* External entry points for sizing and building linker stubs. */
4586
4587 /* Set up various things so that we can make a list of input sections
4588 for each output section included in the link. Returns -1 on error,
4589 0 when no stubs will be needed, and 1 on success. */
4590
4591 int
4592 elf32_arm_setup_section_lists (bfd *output_bfd,
4593 struct bfd_link_info *info)
4594 {
4595 bfd *input_bfd;
4596 unsigned int bfd_count;
4597 unsigned int top_id, top_index;
4598 asection *section;
4599 asection **input_list, **list;
4600 bfd_size_type amt;
4601 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4602
4603 if (htab == NULL)
4604 return 0;
4605 if (! is_elf_hash_table (htab))
4606 return 0;
4607
4608 /* Count the number of input BFDs and find the top input section id. */
4609 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4610 input_bfd != NULL;
4611 input_bfd = input_bfd->link.next)
4612 {
4613 bfd_count += 1;
4614 for (section = input_bfd->sections;
4615 section != NULL;
4616 section = section->next)
4617 {
4618 if (top_id < section->id)
4619 top_id = section->id;
4620 }
4621 }
4622 htab->bfd_count = bfd_count;
4623
4624 amt = sizeof (struct map_stub) * (top_id + 1);
4625 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4626 if (htab->stub_group == NULL)
4627 return -1;
4628 htab->top_id = top_id;
4629
4630 /* We can't use output_bfd->section_count here to find the top output
4631 section index as some sections may have been removed, and
4632 _bfd_strip_section_from_output doesn't renumber the indices. */
4633 for (section = output_bfd->sections, top_index = 0;
4634 section != NULL;
4635 section = section->next)
4636 {
4637 if (top_index < section->index)
4638 top_index = section->index;
4639 }
4640
4641 htab->top_index = top_index;
4642 amt = sizeof (asection *) * (top_index + 1);
4643 input_list = (asection **) bfd_malloc (amt);
4644 htab->input_list = input_list;
4645 if (input_list == NULL)
4646 return -1;
4647
4648 /* For sections we aren't interested in, mark their entries with a
4649 value we can check later. */
4650 list = input_list + top_index;
4651 do
4652 *list = bfd_abs_section_ptr;
4653 while (list-- != input_list);
4654
4655 for (section = output_bfd->sections;
4656 section != NULL;
4657 section = section->next)
4658 {
4659 if ((section->flags & SEC_CODE) != 0)
4660 input_list[section->index] = NULL;
4661 }
4662
4663 return 1;
4664 }
4665
4666 /* The linker repeatedly calls this function for each input section,
4667 in the order that input sections are linked into output sections.
4668 Build lists of input sections to determine groupings between which
4669 we may insert linker stubs. */
4670
4671 void
4672 elf32_arm_next_input_section (struct bfd_link_info *info,
4673 asection *isec)
4674 {
4675 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4676
4677 if (htab == NULL)
4678 return;
4679
4680 if (isec->output_section->index <= htab->top_index)
4681 {
4682 asection **list = htab->input_list + isec->output_section->index;
4683
4684 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4685 {
4686 /* Steal the link_sec pointer for our list. */
4687 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4688 /* This happens to make the list in reverse order,
4689 which we reverse later. */
4690 PREV_SEC (isec) = *list;
4691 *list = isec;
4692 }
4693 }
4694 }
4695
4696 /* See whether we can group stub sections together. Grouping stub
4697 sections may result in fewer stubs. More importantly, we need to
4698 put all .init* and .fini* stubs at the end of the .init or
4699 .fini output sections respectively, because glibc splits the
4700 _init and _fini functions into multiple parts. Putting a stub in
4701 the middle of a function is not a good idea. */
4702
4703 static void
4704 group_sections (struct elf32_arm_link_hash_table *htab,
4705 bfd_size_type stub_group_size,
4706 bfd_boolean stubs_always_after_branch)
4707 {
4708 asection **list = htab->input_list;
4709
4710 do
4711 {
4712 asection *tail = *list;
4713 asection *head;
4714
4715 if (tail == bfd_abs_section_ptr)
4716 continue;
4717
4718 /* Reverse the list: we must avoid placing stubs at the
4719 beginning of the section because the beginning of the text
4720 section may be required for an interrupt vector in bare metal
4721 code. */
4722 #define NEXT_SEC PREV_SEC
4723 head = NULL;
4724 while (tail != NULL)
4725 {
4726 /* Pop from tail. */
4727 asection *item = tail;
4728 tail = PREV_SEC (item);
4729
4730 /* Push on head. */
4731 NEXT_SEC (item) = head;
4732 head = item;
4733 }
4734
4735 while (head != NULL)
4736 {
4737 asection *curr;
4738 asection *next;
4739 bfd_vma stub_group_start = head->output_offset;
4740 bfd_vma end_of_next;
4741
4742 curr = head;
4743 while (NEXT_SEC (curr) != NULL)
4744 {
4745 next = NEXT_SEC (curr);
4746 end_of_next = next->output_offset + next->size;
4747 if (end_of_next - stub_group_start >= stub_group_size)
4748 /* End of NEXT is too far from start, so stop. */
4749 break;
4750 /* Add NEXT to the group. */
4751 curr = next;
4752 }
4753
4754 /* OK, the size from the start to the start of CURR is less
4755 than stub_group_size and thus can be handled by one stub
4756 section. (Or the head section is itself larger than
4757 stub_group_size, in which case we may be toast.)
4758 We should really be keeping track of the total size of
4759 stubs added here, as stubs contribute to the final output
4760 section size. */
4761 do
4762 {
4763 next = NEXT_SEC (head);
4764 /* Set up this stub group. */
4765 htab->stub_group[head->id].link_sec = curr;
4766 }
4767 while (head != curr && (head = next) != NULL);
4768
4769 /* But wait, there's more! Input sections up to stub_group_size
4770 bytes after the stub section can be handled by it too. */
4771 if (!stubs_always_after_branch)
4772 {
4773 stub_group_start = curr->output_offset + curr->size;
4774
4775 while (next != NULL)
4776 {
4777 end_of_next = next->output_offset + next->size;
4778 if (end_of_next - stub_group_start >= stub_group_size)
4779 /* End of NEXT is too far from stubs, so stop. */
4780 break;
4781 /* Add NEXT to the stub group. */
4782 head = next;
4783 next = NEXT_SEC (head);
4784 htab->stub_group[head->id].link_sec = curr;
4785 }
4786 }
4787 head = next;
4788 }
4789 }
4790 while (list++ != htab->input_list + htab->top_index);
4791
4792 free (htab->input_list);
4793 #undef PREV_SEC
4794 #undef NEXT_SEC
4795 }
4796
4797 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4798 erratum fix. */
4799
4800 static int
4801 a8_reloc_compare (const void *a, const void *b)
4802 {
4803 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4804 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4805
4806 if (ra->from < rb->from)
4807 return -1;
4808 else if (ra->from > rb->from)
4809 return 1;
4810 else
4811 return 0;
4812 }
4813
4814 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4815 const char *, char **);
4816
4817 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4818 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4819 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4820 otherwise. */
4821
4822 static bfd_boolean
4823 cortex_a8_erratum_scan (bfd *input_bfd,
4824 struct bfd_link_info *info,
4825 struct a8_erratum_fix **a8_fixes_p,
4826 unsigned int *num_a8_fixes_p,
4827 unsigned int *a8_fix_table_size_p,
4828 struct a8_erratum_reloc *a8_relocs,
4829 unsigned int num_a8_relocs,
4830 unsigned prev_num_a8_fixes,
4831 bfd_boolean *stub_changed_p)
4832 {
4833 asection *section;
4834 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4835 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4836 unsigned int num_a8_fixes = *num_a8_fixes_p;
4837 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4838
4839 if (htab == NULL)
4840 return FALSE;
4841
4842 for (section = input_bfd->sections;
4843 section != NULL;
4844 section = section->next)
4845 {
4846 bfd_byte *contents = NULL;
4847 struct _arm_elf_section_data *sec_data;
4848 unsigned int span;
4849 bfd_vma base_vma;
4850
4851 if (elf_section_type (section) != SHT_PROGBITS
4852 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4853 || (section->flags & SEC_EXCLUDE) != 0
4854 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4855 || (section->output_section == bfd_abs_section_ptr))
4856 continue;
4857
4858 base_vma = section->output_section->vma + section->output_offset;
4859
4860 if (elf_section_data (section)->this_hdr.contents != NULL)
4861 contents = elf_section_data (section)->this_hdr.contents;
4862 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4863 return TRUE;
4864
4865 sec_data = elf32_arm_section_data (section);
4866
4867 for (span = 0; span < sec_data->mapcount; span++)
4868 {
4869 unsigned int span_start = sec_data->map[span].vma;
4870 unsigned int span_end = (span == sec_data->mapcount - 1)
4871 ? section->size : sec_data->map[span + 1].vma;
4872 unsigned int i;
4873 char span_type = sec_data->map[span].type;
4874 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4875
4876 if (span_type != 't')
4877 continue;
4878
4879 /* Span is entirely within a single 4KB region: skip scanning. */
4880 if (((base_vma + span_start) & ~0xfff)
4881 == ((base_vma + span_end) & ~0xfff))
4882 continue;
4883
4884 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4885
4886 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4887 * The branch target is in the same 4KB region as the
4888 first half of the branch.
4889 * The instruction before the branch is a 32-bit
4890 length non-branch instruction. */
4891 for (i = span_start; i < span_end;)
4892 {
4893 unsigned int insn = bfd_getl16 (&contents[i]);
4894 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4895 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4896
4897 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4898 insn_32bit = TRUE;
4899
4900 if (insn_32bit)
4901 {
4902 /* Load the rest of the insn (in manual-friendly order). */
4903 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4904
4905 /* Encoding T4: B<c>.W. */
4906 is_b = (insn & 0xf800d000) == 0xf0009000;
4907 /* Encoding T1: BL<c>.W. */
4908 is_bl = (insn & 0xf800d000) == 0xf000d000;
4909 /* Encoding T2: BLX<c>.W. */
4910 is_blx = (insn & 0xf800d000) == 0xf000c000;
4911 /* Encoding T3: B<c>.W (not permitted in IT block). */
4912 is_bcc = (insn & 0xf800d000) == 0xf0008000
4913 && (insn & 0x07f00000) != 0x03800000;
4914 }
4915
4916 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4917
4918 if (((base_vma + i) & 0xfff) == 0xffe
4919 && insn_32bit
4920 && is_32bit_branch
4921 && last_was_32bit
4922 && ! last_was_branch)
4923 {
4924 bfd_signed_vma offset = 0;
4925 bfd_boolean force_target_arm = FALSE;
4926 bfd_boolean force_target_thumb = FALSE;
4927 bfd_vma target;
4928 enum elf32_arm_stub_type stub_type = arm_stub_none;
4929 struct a8_erratum_reloc key, *found;
4930 bfd_boolean use_plt = FALSE;
4931
4932 key.from = base_vma + i;
4933 found = (struct a8_erratum_reloc *)
4934 bsearch (&key, a8_relocs, num_a8_relocs,
4935 sizeof (struct a8_erratum_reloc),
4936 &a8_reloc_compare);
4937
4938 if (found)
4939 {
4940 char *error_message = NULL;
4941 struct elf_link_hash_entry *entry;
4942
4943 /* We don't care about the error returned from this
4944 function, only if there is glue or not. */
4945 entry = find_thumb_glue (info, found->sym_name,
4946 &error_message);
4947
4948 if (entry)
4949 found->non_a8_stub = TRUE;
4950
4951 /* Keep a simpler condition, for the sake of clarity. */
4952 if (htab->root.splt != NULL && found->hash != NULL
4953 && found->hash->root.plt.offset != (bfd_vma) -1)
4954 use_plt = TRUE;
4955
4956 if (found->r_type == R_ARM_THM_CALL)
4957 {
4958 if (found->branch_type == ST_BRANCH_TO_ARM
4959 || use_plt)
4960 force_target_arm = TRUE;
4961 else
4962 force_target_thumb = TRUE;
4963 }
4964 }
4965
4966 /* Check if we have an offending branch instruction. */
4967
4968 if (found && found->non_a8_stub)
4969 /* We've already made a stub for this instruction, e.g.
4970 it's a long branch or a Thumb->ARM stub. Assume that
4971 stub will suffice to work around the A8 erratum (see
4972 setting of always_after_branch above). */
4973 ;
4974 else if (is_bcc)
4975 {
4976 offset = (insn & 0x7ff) << 1;
4977 offset |= (insn & 0x3f0000) >> 4;
4978 offset |= (insn & 0x2000) ? 0x40000 : 0;
4979 offset |= (insn & 0x800) ? 0x80000 : 0;
4980 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4981 if (offset & 0x100000)
4982 offset |= ~ ((bfd_signed_vma) 0xfffff);
4983 stub_type = arm_stub_a8_veneer_b_cond;
4984 }
4985 else if (is_b || is_bl || is_blx)
4986 {
4987 int s = (insn & 0x4000000) != 0;
4988 int j1 = (insn & 0x2000) != 0;
4989 int j2 = (insn & 0x800) != 0;
4990 int i1 = !(j1 ^ s);
4991 int i2 = !(j2 ^ s);
4992
4993 offset = (insn & 0x7ff) << 1;
4994 offset |= (insn & 0x3ff0000) >> 4;
4995 offset |= i2 << 22;
4996 offset |= i1 << 23;
4997 offset |= s << 24;
4998 if (offset & 0x1000000)
4999 offset |= ~ ((bfd_signed_vma) 0xffffff);
5000
5001 if (is_blx)
5002 offset &= ~ ((bfd_signed_vma) 3);
5003
5004 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5005 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5006 }
5007
5008 if (stub_type != arm_stub_none)
5009 {
5010 bfd_vma pc_for_insn = base_vma + i + 4;
5011
5012 /* The original instruction is a BL, but the target is
5013 an ARM instruction. If we were not making a stub,
5014 the BL would have been converted to a BLX. Use the
5015 BLX stub instead in that case. */
5016 if (htab->use_blx && force_target_arm
5017 && stub_type == arm_stub_a8_veneer_bl)
5018 {
5019 stub_type = arm_stub_a8_veneer_blx;
5020 is_blx = TRUE;
5021 is_bl = FALSE;
5022 }
5023 /* Conversely, if the original instruction was
5024 BLX but the target is Thumb mode, use the BL
5025 stub. */
5026 else if (force_target_thumb
5027 && stub_type == arm_stub_a8_veneer_blx)
5028 {
5029 stub_type = arm_stub_a8_veneer_bl;
5030 is_blx = FALSE;
5031 is_bl = TRUE;
5032 }
5033
5034 if (is_blx)
5035 pc_for_insn &= ~ ((bfd_vma) 3);
5036
5037 /* If we found a relocation, use the proper destination,
5038 not the offset in the (unrelocated) instruction.
5039 Note this is always done if we switched the stub type
5040 above. */
5041 if (found)
5042 offset =
5043 (bfd_signed_vma) (found->destination - pc_for_insn);
5044
5045 /* If the stub will use a Thumb-mode branch to a
5046 PLT target, redirect it to the preceding Thumb
5047 entry point. */
5048 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5049 offset -= PLT_THUMB_STUB_SIZE;
5050
5051 target = pc_for_insn + offset;
5052
5053 /* The BLX stub is ARM-mode code. Adjust the offset to
5054 take the different PC value (+8 instead of +4) into
5055 account. */
5056 if (stub_type == arm_stub_a8_veneer_blx)
5057 offset += 4;
5058
5059 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5060 {
5061 char *stub_name = NULL;
5062
5063 if (num_a8_fixes == a8_fix_table_size)
5064 {
5065 a8_fix_table_size *= 2;
5066 a8_fixes = (struct a8_erratum_fix *)
5067 bfd_realloc (a8_fixes,
5068 sizeof (struct a8_erratum_fix)
5069 * a8_fix_table_size);
5070 }
5071
5072 if (num_a8_fixes < prev_num_a8_fixes)
5073 {
5074 /* If we're doing a subsequent scan,
5075 check if we've found the same fix as
5076 before, and try and reuse the stub
5077 name. */
5078 stub_name = a8_fixes[num_a8_fixes].stub_name;
5079 if ((a8_fixes[num_a8_fixes].section != section)
5080 || (a8_fixes[num_a8_fixes].offset != i))
5081 {
5082 free (stub_name);
5083 stub_name = NULL;
5084 *stub_changed_p = TRUE;
5085 }
5086 }
5087
5088 if (!stub_name)
5089 {
5090 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5091 if (stub_name != NULL)
5092 sprintf (stub_name, "%x:%x", section->id, i);
5093 }
5094
5095 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5096 a8_fixes[num_a8_fixes].section = section;
5097 a8_fixes[num_a8_fixes].offset = i;
5098 a8_fixes[num_a8_fixes].target_offset =
5099 target - base_vma;
5100 a8_fixes[num_a8_fixes].orig_insn = insn;
5101 a8_fixes[num_a8_fixes].stub_name = stub_name;
5102 a8_fixes[num_a8_fixes].stub_type = stub_type;
5103 a8_fixes[num_a8_fixes].branch_type =
5104 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5105
5106 num_a8_fixes++;
5107 }
5108 }
5109 }
5110
5111 i += insn_32bit ? 4 : 2;
5112 last_was_32bit = insn_32bit;
5113 last_was_branch = is_32bit_branch;
5114 }
5115 }
5116
5117 if (elf_section_data (section)->this_hdr.contents == NULL)
5118 free (contents);
5119 }
5120
5121 *a8_fixes_p = a8_fixes;
5122 *num_a8_fixes_p = num_a8_fixes;
5123 *a8_fix_table_size_p = a8_fix_table_size;
5124
5125 return FALSE;
5126 }
5127
5128 /* Create or update a stub entry depending on whether the stub can already be
5129 found in HTAB. The stub is identified by:
5130 - its type STUB_TYPE
5131 - its source branch (note that several can share the same stub) whose
5132 section and relocation (if any) are given by SECTION and IRELA
5133 respectively
5134 - its target symbol whose input section, hash, name, value and branch type
5135 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5136 respectively
5137
5138 If found, the value of the stub's target symbol is updated from SYM_VALUE
5139 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5140 TRUE and the stub entry is initialized.
5141
5142 Returns whether the stub could be successfully created or updated, or FALSE
5143 if an error occured. */
5144
5145 static bfd_boolean
5146 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5147 enum elf32_arm_stub_type stub_type, asection *section,
5148 Elf_Internal_Rela *irela, asection *sym_sec,
5149 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5150 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5151 bfd_boolean *new_stub)
5152 {
5153 const asection *id_sec;
5154 char *stub_name;
5155 struct elf32_arm_stub_hash_entry *stub_entry;
5156 unsigned int r_type;
5157 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5158
5159 BFD_ASSERT (stub_type != arm_stub_none);
5160 *new_stub = FALSE;
5161
5162 if (sym_claimed)
5163 stub_name = sym_name;
5164 else
5165 {
5166 BFD_ASSERT (irela);
5167 BFD_ASSERT (section);
5168
5169 /* Support for grouping stub sections. */
5170 id_sec = htab->stub_group[section->id].link_sec;
5171
5172 /* Get the name of this stub. */
5173 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5174 stub_type);
5175 if (!stub_name)
5176 return FALSE;
5177 }
5178
5179 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5180 FALSE);
5181 /* The proper stub has already been created, just update its value. */
5182 if (stub_entry != NULL)
5183 {
5184 if (!sym_claimed)
5185 free (stub_name);
5186 stub_entry->target_value = sym_value;
5187 return TRUE;
5188 }
5189
5190 stub_entry = elf32_arm_add_stub (stub_name, section, htab);
5191 if (stub_entry == NULL)
5192 {
5193 if (!sym_claimed)
5194 free (stub_name);
5195 return FALSE;
5196 }
5197
5198 stub_entry->target_value = sym_value;
5199 stub_entry->target_section = sym_sec;
5200 stub_entry->stub_type = stub_type;
5201 stub_entry->h = hash;
5202 stub_entry->branch_type = branch_type;
5203
5204 if (sym_claimed)
5205 stub_entry->output_name = sym_name;
5206 else
5207 {
5208 if (sym_name == NULL)
5209 sym_name = "unnamed";
5210 stub_entry->output_name = (char *)
5211 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5212 + strlen (sym_name));
5213 if (stub_entry->output_name == NULL)
5214 {
5215 free (stub_name);
5216 return FALSE;
5217 }
5218
5219 /* For historical reasons, use the existing names for ARM-to-Thumb and
5220 Thumb-to-ARM stubs. */
5221 r_type = ELF32_R_TYPE (irela->r_info);
5222 if ((r_type == (unsigned int) R_ARM_THM_CALL
5223 || r_type == (unsigned int) R_ARM_THM_JUMP24
5224 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5225 && branch_type == ST_BRANCH_TO_ARM)
5226 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5227 else if ((r_type == (unsigned int) R_ARM_CALL
5228 || r_type == (unsigned int) R_ARM_JUMP24)
5229 && branch_type == ST_BRANCH_TO_THUMB)
5230 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5231 else
5232 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5233 }
5234
5235 *new_stub = TRUE;
5236 return TRUE;
5237 }
5238
5239 /* Determine and set the size of the stub section for a final link.
5240
5241 The basic idea here is to examine all the relocations looking for
5242 PC-relative calls to a target that is unreachable with a "bl"
5243 instruction. */
5244
5245 bfd_boolean
5246 elf32_arm_size_stubs (bfd *output_bfd,
5247 bfd *stub_bfd,
5248 struct bfd_link_info *info,
5249 bfd_signed_vma group_size,
5250 asection * (*add_stub_section) (const char *, asection *,
5251 asection *,
5252 unsigned int),
5253 void (*layout_sections_again) (void))
5254 {
5255 bfd_size_type stub_group_size;
5256 bfd_boolean stubs_always_after_branch;
5257 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5258 struct a8_erratum_fix *a8_fixes = NULL;
5259 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5260 struct a8_erratum_reloc *a8_relocs = NULL;
5261 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5262
5263 if (htab == NULL)
5264 return FALSE;
5265
5266 if (htab->fix_cortex_a8)
5267 {
5268 a8_fixes = (struct a8_erratum_fix *)
5269 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5270 a8_relocs = (struct a8_erratum_reloc *)
5271 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5272 }
5273
5274 /* Propagate mach to stub bfd, because it may not have been
5275 finalized when we created stub_bfd. */
5276 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5277 bfd_get_mach (output_bfd));
5278
5279 /* Stash our params away. */
5280 htab->stub_bfd = stub_bfd;
5281 htab->add_stub_section = add_stub_section;
5282 htab->layout_sections_again = layout_sections_again;
5283 stubs_always_after_branch = group_size < 0;
5284
5285 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5286 as the first half of a 32-bit branch straddling two 4K pages. This is a
5287 crude way of enforcing that. */
5288 if (htab->fix_cortex_a8)
5289 stubs_always_after_branch = 1;
5290
5291 if (group_size < 0)
5292 stub_group_size = -group_size;
5293 else
5294 stub_group_size = group_size;
5295
5296 if (stub_group_size == 1)
5297 {
5298 /* Default values. */
5299 /* Thumb branch range is +-4MB has to be used as the default
5300 maximum size (a given section can contain both ARM and Thumb
5301 code, so the worst case has to be taken into account).
5302
5303 This value is 24K less than that, which allows for 2025
5304 12-byte stubs. If we exceed that, then we will fail to link.
5305 The user will have to relink with an explicit group size
5306 option. */
5307 stub_group_size = 4170000;
5308 }
5309
5310 group_sections (htab, stub_group_size, stubs_always_after_branch);
5311
5312 /* If we're applying the cortex A8 fix, we need to determine the
5313 program header size now, because we cannot change it later --
5314 that could alter section placements. Notice the A8 erratum fix
5315 ends up requiring the section addresses to remain unchanged
5316 modulo the page size. That's something we cannot represent
5317 inside BFD, and we don't want to force the section alignment to
5318 be the page size. */
5319 if (htab->fix_cortex_a8)
5320 (*htab->layout_sections_again) ();
5321
5322 while (1)
5323 {
5324 bfd *input_bfd;
5325 unsigned int bfd_indx;
5326 asection *stub_sec;
5327 bfd_boolean stub_changed = FALSE;
5328 unsigned prev_num_a8_fixes = num_a8_fixes;
5329
5330 num_a8_fixes = 0;
5331 for (input_bfd = info->input_bfds, bfd_indx = 0;
5332 input_bfd != NULL;
5333 input_bfd = input_bfd->link.next, bfd_indx++)
5334 {
5335 Elf_Internal_Shdr *symtab_hdr;
5336 asection *section;
5337 Elf_Internal_Sym *local_syms = NULL;
5338
5339 if (!is_arm_elf (input_bfd))
5340 continue;
5341
5342 num_a8_relocs = 0;
5343
5344 /* We'll need the symbol table in a second. */
5345 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5346 if (symtab_hdr->sh_info == 0)
5347 continue;
5348
5349 /* Walk over each section attached to the input bfd. */
5350 for (section = input_bfd->sections;
5351 section != NULL;
5352 section = section->next)
5353 {
5354 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5355
5356 /* If there aren't any relocs, then there's nothing more
5357 to do. */
5358 if ((section->flags & SEC_RELOC) == 0
5359 || section->reloc_count == 0
5360 || (section->flags & SEC_CODE) == 0)
5361 continue;
5362
5363 /* If this section is a link-once section that will be
5364 discarded, then don't create any stubs. */
5365 if (section->output_section == NULL
5366 || section->output_section->owner != output_bfd)
5367 continue;
5368
5369 /* Get the relocs. */
5370 internal_relocs
5371 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5372 NULL, info->keep_memory);
5373 if (internal_relocs == NULL)
5374 goto error_ret_free_local;
5375
5376 /* Now examine each relocation. */
5377 irela = internal_relocs;
5378 irelaend = irela + section->reloc_count;
5379 for (; irela < irelaend; irela++)
5380 {
5381 unsigned int r_type, r_indx;
5382 enum elf32_arm_stub_type stub_type;
5383 asection *sym_sec;
5384 bfd_vma sym_value;
5385 bfd_vma destination;
5386 struct elf32_arm_link_hash_entry *hash;
5387 const char *sym_name;
5388 unsigned char st_type;
5389 enum arm_st_branch_type branch_type;
5390 bfd_boolean created_stub = FALSE;
5391
5392 r_type = ELF32_R_TYPE (irela->r_info);
5393 r_indx = ELF32_R_SYM (irela->r_info);
5394
5395 if (r_type >= (unsigned int) R_ARM_max)
5396 {
5397 bfd_set_error (bfd_error_bad_value);
5398 error_ret_free_internal:
5399 if (elf_section_data (section)->relocs == NULL)
5400 free (internal_relocs);
5401 /* Fall through. */
5402 error_ret_free_local:
5403 if (local_syms != NULL
5404 && (symtab_hdr->contents
5405 != (unsigned char *) local_syms))
5406 free (local_syms);
5407 return FALSE;
5408 }
5409
5410 hash = NULL;
5411 if (r_indx >= symtab_hdr->sh_info)
5412 hash = elf32_arm_hash_entry
5413 (elf_sym_hashes (input_bfd)
5414 [r_indx - symtab_hdr->sh_info]);
5415
5416 /* Only look for stubs on branch instructions, or
5417 non-relaxed TLSCALL */
5418 if ((r_type != (unsigned int) R_ARM_CALL)
5419 && (r_type != (unsigned int) R_ARM_THM_CALL)
5420 && (r_type != (unsigned int) R_ARM_JUMP24)
5421 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5422 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5423 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5424 && (r_type != (unsigned int) R_ARM_PLT32)
5425 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5426 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5427 && r_type == elf32_arm_tls_transition
5428 (info, r_type, &hash->root)
5429 && ((hash ? hash->tls_type
5430 : (elf32_arm_local_got_tls_type
5431 (input_bfd)[r_indx]))
5432 & GOT_TLS_GDESC) != 0))
5433 continue;
5434
5435 /* Now determine the call target, its name, value,
5436 section. */
5437 sym_sec = NULL;
5438 sym_value = 0;
5439 destination = 0;
5440 sym_name = NULL;
5441
5442 if (r_type == (unsigned int) R_ARM_TLS_CALL
5443 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5444 {
5445 /* A non-relaxed TLS call. The target is the
5446 plt-resident trampoline and nothing to do
5447 with the symbol. */
5448 BFD_ASSERT (htab->tls_trampoline > 0);
5449 sym_sec = htab->root.splt;
5450 sym_value = htab->tls_trampoline;
5451 hash = 0;
5452 st_type = STT_FUNC;
5453 branch_type = ST_BRANCH_TO_ARM;
5454 }
5455 else if (!hash)
5456 {
5457 /* It's a local symbol. */
5458 Elf_Internal_Sym *sym;
5459
5460 if (local_syms == NULL)
5461 {
5462 local_syms
5463 = (Elf_Internal_Sym *) symtab_hdr->contents;
5464 if (local_syms == NULL)
5465 local_syms
5466 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5467 symtab_hdr->sh_info, 0,
5468 NULL, NULL, NULL);
5469 if (local_syms == NULL)
5470 goto error_ret_free_internal;
5471 }
5472
5473 sym = local_syms + r_indx;
5474 if (sym->st_shndx == SHN_UNDEF)
5475 sym_sec = bfd_und_section_ptr;
5476 else if (sym->st_shndx == SHN_ABS)
5477 sym_sec = bfd_abs_section_ptr;
5478 else if (sym->st_shndx == SHN_COMMON)
5479 sym_sec = bfd_com_section_ptr;
5480 else
5481 sym_sec =
5482 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5483
5484 if (!sym_sec)
5485 /* This is an undefined symbol. It can never
5486 be resolved. */
5487 continue;
5488
5489 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5490 sym_value = sym->st_value;
5491 destination = (sym_value + irela->r_addend
5492 + sym_sec->output_offset
5493 + sym_sec->output_section->vma);
5494 st_type = ELF_ST_TYPE (sym->st_info);
5495 branch_type =
5496 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5497 sym_name
5498 = bfd_elf_string_from_elf_section (input_bfd,
5499 symtab_hdr->sh_link,
5500 sym->st_name);
5501 }
5502 else
5503 {
5504 /* It's an external symbol. */
5505 while (hash->root.root.type == bfd_link_hash_indirect
5506 || hash->root.root.type == bfd_link_hash_warning)
5507 hash = ((struct elf32_arm_link_hash_entry *)
5508 hash->root.root.u.i.link);
5509
5510 if (hash->root.root.type == bfd_link_hash_defined
5511 || hash->root.root.type == bfd_link_hash_defweak)
5512 {
5513 sym_sec = hash->root.root.u.def.section;
5514 sym_value = hash->root.root.u.def.value;
5515
5516 struct elf32_arm_link_hash_table *globals =
5517 elf32_arm_hash_table (info);
5518
5519 /* For a destination in a shared library,
5520 use the PLT stub as target address to
5521 decide whether a branch stub is
5522 needed. */
5523 if (globals != NULL
5524 && globals->root.splt != NULL
5525 && hash != NULL
5526 && hash->root.plt.offset != (bfd_vma) -1)
5527 {
5528 sym_sec = globals->root.splt;
5529 sym_value = hash->root.plt.offset;
5530 if (sym_sec->output_section != NULL)
5531 destination = (sym_value
5532 + sym_sec->output_offset
5533 + sym_sec->output_section->vma);
5534 }
5535 else if (sym_sec->output_section != NULL)
5536 destination = (sym_value + irela->r_addend
5537 + sym_sec->output_offset
5538 + sym_sec->output_section->vma);
5539 }
5540 else if ((hash->root.root.type == bfd_link_hash_undefined)
5541 || (hash->root.root.type == bfd_link_hash_undefweak))
5542 {
5543 /* For a shared library, use the PLT stub as
5544 target address to decide whether a long
5545 branch stub is needed.
5546 For absolute code, they cannot be handled. */
5547 struct elf32_arm_link_hash_table *globals =
5548 elf32_arm_hash_table (info);
5549
5550 if (globals != NULL
5551 && globals->root.splt != NULL
5552 && hash != NULL
5553 && hash->root.plt.offset != (bfd_vma) -1)
5554 {
5555 sym_sec = globals->root.splt;
5556 sym_value = hash->root.plt.offset;
5557 if (sym_sec->output_section != NULL)
5558 destination = (sym_value
5559 + sym_sec->output_offset
5560 + sym_sec->output_section->vma);
5561 }
5562 else
5563 continue;
5564 }
5565 else
5566 {
5567 bfd_set_error (bfd_error_bad_value);
5568 goto error_ret_free_internal;
5569 }
5570 st_type = hash->root.type;
5571 branch_type =
5572 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5573 sym_name = hash->root.root.root.string;
5574 }
5575
5576 do
5577 {
5578 bfd_boolean new_stub;
5579
5580 /* Determine what (if any) linker stub is needed. */
5581 stub_type = arm_type_of_stub (info, section, irela,
5582 st_type, &branch_type,
5583 hash, destination, sym_sec,
5584 input_bfd, sym_name);
5585 if (stub_type == arm_stub_none)
5586 break;
5587
5588 /* We've either created a stub for this reloc already,
5589 or we are about to. */
5590 created_stub =
5591 elf32_arm_create_stub (htab, stub_type, section, irela,
5592 sym_sec, hash,
5593 (char *) sym_name, sym_value,
5594 branch_type, &new_stub);
5595
5596 if (!created_stub)
5597 goto error_ret_free_internal;
5598 else if (!new_stub)
5599 break;
5600 else
5601 stub_changed = TRUE;
5602 }
5603 while (0);
5604
5605 /* Look for relocations which might trigger Cortex-A8
5606 erratum. */
5607 if (htab->fix_cortex_a8
5608 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5609 || r_type == (unsigned int) R_ARM_THM_JUMP19
5610 || r_type == (unsigned int) R_ARM_THM_CALL
5611 || r_type == (unsigned int) R_ARM_THM_XPC22))
5612 {
5613 bfd_vma from = section->output_section->vma
5614 + section->output_offset
5615 + irela->r_offset;
5616
5617 if ((from & 0xfff) == 0xffe)
5618 {
5619 /* Found a candidate. Note we haven't checked the
5620 destination is within 4K here: if we do so (and
5621 don't create an entry in a8_relocs) we can't tell
5622 that a branch should have been relocated when
5623 scanning later. */
5624 if (num_a8_relocs == a8_reloc_table_size)
5625 {
5626 a8_reloc_table_size *= 2;
5627 a8_relocs = (struct a8_erratum_reloc *)
5628 bfd_realloc (a8_relocs,
5629 sizeof (struct a8_erratum_reloc)
5630 * a8_reloc_table_size);
5631 }
5632
5633 a8_relocs[num_a8_relocs].from = from;
5634 a8_relocs[num_a8_relocs].destination = destination;
5635 a8_relocs[num_a8_relocs].r_type = r_type;
5636 a8_relocs[num_a8_relocs].branch_type = branch_type;
5637 a8_relocs[num_a8_relocs].sym_name = sym_name;
5638 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5639 a8_relocs[num_a8_relocs].hash = hash;
5640
5641 num_a8_relocs++;
5642 }
5643 }
5644 }
5645
5646 /* We're done with the internal relocs, free them. */
5647 if (elf_section_data (section)->relocs == NULL)
5648 free (internal_relocs);
5649 }
5650
5651 if (htab->fix_cortex_a8)
5652 {
5653 /* Sort relocs which might apply to Cortex-A8 erratum. */
5654 qsort (a8_relocs, num_a8_relocs,
5655 sizeof (struct a8_erratum_reloc),
5656 &a8_reloc_compare);
5657
5658 /* Scan for branches which might trigger Cortex-A8 erratum. */
5659 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5660 &num_a8_fixes, &a8_fix_table_size,
5661 a8_relocs, num_a8_relocs,
5662 prev_num_a8_fixes, &stub_changed)
5663 != 0)
5664 goto error_ret_free_local;
5665 }
5666 }
5667
5668 if (prev_num_a8_fixes != num_a8_fixes)
5669 stub_changed = TRUE;
5670
5671 if (!stub_changed)
5672 break;
5673
5674 /* OK, we've added some stubs. Find out the new size of the
5675 stub sections. */
5676 for (stub_sec = htab->stub_bfd->sections;
5677 stub_sec != NULL;
5678 stub_sec = stub_sec->next)
5679 {
5680 /* Ignore non-stub sections. */
5681 if (!strstr (stub_sec->name, STUB_SUFFIX))
5682 continue;
5683
5684 stub_sec->size = 0;
5685 }
5686
5687 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5688
5689 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5690 if (htab->fix_cortex_a8)
5691 for (i = 0; i < num_a8_fixes; i++)
5692 {
5693 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5694 a8_fixes[i].section, htab);
5695
5696 if (stub_sec == NULL)
5697 goto error_ret_free_local;
5698
5699 stub_sec->size
5700 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5701 NULL);
5702 }
5703
5704
5705 /* Ask the linker to do its stuff. */
5706 (*htab->layout_sections_again) ();
5707 }
5708
5709 /* Add stubs for Cortex-A8 erratum fixes now. */
5710 if (htab->fix_cortex_a8)
5711 {
5712 for (i = 0; i < num_a8_fixes; i++)
5713 {
5714 struct elf32_arm_stub_hash_entry *stub_entry;
5715 char *stub_name = a8_fixes[i].stub_name;
5716 asection *section = a8_fixes[i].section;
5717 unsigned int section_id = a8_fixes[i].section->id;
5718 asection *link_sec = htab->stub_group[section_id].link_sec;
5719 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5720 const insn_sequence *template_sequence;
5721 int template_size, size = 0;
5722
5723 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5724 TRUE, FALSE);
5725 if (stub_entry == NULL)
5726 {
5727 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5728 section->owner,
5729 stub_name);
5730 return FALSE;
5731 }
5732
5733 stub_entry->stub_sec = stub_sec;
5734 stub_entry->stub_offset = 0;
5735 stub_entry->id_sec = link_sec;
5736 stub_entry->stub_type = a8_fixes[i].stub_type;
5737 stub_entry->source_value = a8_fixes[i].offset;
5738 stub_entry->target_section = a8_fixes[i].section;
5739 stub_entry->target_value = a8_fixes[i].target_offset;
5740 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5741 stub_entry->branch_type = a8_fixes[i].branch_type;
5742
5743 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5744 &template_sequence,
5745 &template_size);
5746
5747 stub_entry->stub_size = size;
5748 stub_entry->stub_template = template_sequence;
5749 stub_entry->stub_template_size = template_size;
5750 }
5751
5752 /* Stash the Cortex-A8 erratum fix array for use later in
5753 elf32_arm_write_section(). */
5754 htab->a8_erratum_fixes = a8_fixes;
5755 htab->num_a8_erratum_fixes = num_a8_fixes;
5756 }
5757 else
5758 {
5759 htab->a8_erratum_fixes = NULL;
5760 htab->num_a8_erratum_fixes = 0;
5761 }
5762 return TRUE;
5763 }
5764
5765 /* Build all the stubs associated with the current output file. The
5766 stubs are kept in a hash table attached to the main linker hash
5767 table. We also set up the .plt entries for statically linked PIC
5768 functions here. This function is called via arm_elf_finish in the
5769 linker. */
5770
5771 bfd_boolean
5772 elf32_arm_build_stubs (struct bfd_link_info *info)
5773 {
5774 asection *stub_sec;
5775 struct bfd_hash_table *table;
5776 struct elf32_arm_link_hash_table *htab;
5777
5778 htab = elf32_arm_hash_table (info);
5779 if (htab == NULL)
5780 return FALSE;
5781
5782 for (stub_sec = htab->stub_bfd->sections;
5783 stub_sec != NULL;
5784 stub_sec = stub_sec->next)
5785 {
5786 bfd_size_type size;
5787
5788 /* Ignore non-stub sections. */
5789 if (!strstr (stub_sec->name, STUB_SUFFIX))
5790 continue;
5791
5792 /* Allocate memory to hold the linker stubs. */
5793 size = stub_sec->size;
5794 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5795 if (stub_sec->contents == NULL && size != 0)
5796 return FALSE;
5797 stub_sec->size = 0;
5798 }
5799
5800 /* Build the stubs as directed by the stub hash table. */
5801 table = &htab->stub_hash_table;
5802 bfd_hash_traverse (table, arm_build_one_stub, info);
5803 if (htab->fix_cortex_a8)
5804 {
5805 /* Place the cortex a8 stubs last. */
5806 htab->fix_cortex_a8 = -1;
5807 bfd_hash_traverse (table, arm_build_one_stub, info);
5808 }
5809
5810 return TRUE;
5811 }
5812
5813 /* Locate the Thumb encoded calling stub for NAME. */
5814
5815 static struct elf_link_hash_entry *
5816 find_thumb_glue (struct bfd_link_info *link_info,
5817 const char *name,
5818 char **error_message)
5819 {
5820 char *tmp_name;
5821 struct elf_link_hash_entry *hash;
5822 struct elf32_arm_link_hash_table *hash_table;
5823
5824 /* We need a pointer to the armelf specific hash table. */
5825 hash_table = elf32_arm_hash_table (link_info);
5826 if (hash_table == NULL)
5827 return NULL;
5828
5829 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5830 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5831
5832 BFD_ASSERT (tmp_name);
5833
5834 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5835
5836 hash = elf_link_hash_lookup
5837 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5838
5839 if (hash == NULL
5840 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5841 tmp_name, name) == -1)
5842 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5843
5844 free (tmp_name);
5845
5846 return hash;
5847 }
5848
5849 /* Locate the ARM encoded calling stub for NAME. */
5850
5851 static struct elf_link_hash_entry *
5852 find_arm_glue (struct bfd_link_info *link_info,
5853 const char *name,
5854 char **error_message)
5855 {
5856 char *tmp_name;
5857 struct elf_link_hash_entry *myh;
5858 struct elf32_arm_link_hash_table *hash_table;
5859
5860 /* We need a pointer to the elfarm specific hash table. */
5861 hash_table = elf32_arm_hash_table (link_info);
5862 if (hash_table == NULL)
5863 return NULL;
5864
5865 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5866 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5867
5868 BFD_ASSERT (tmp_name);
5869
5870 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5871
5872 myh = elf_link_hash_lookup
5873 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5874
5875 if (myh == NULL
5876 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5877 tmp_name, name) == -1)
5878 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5879
5880 free (tmp_name);
5881
5882 return myh;
5883 }
5884
5885 /* ARM->Thumb glue (static images):
5886
5887 .arm
5888 __func_from_arm:
5889 ldr r12, __func_addr
5890 bx r12
5891 __func_addr:
5892 .word func @ behave as if you saw a ARM_32 reloc.
5893
5894 (v5t static images)
5895 .arm
5896 __func_from_arm:
5897 ldr pc, __func_addr
5898 __func_addr:
5899 .word func @ behave as if you saw a ARM_32 reloc.
5900
5901 (relocatable images)
5902 .arm
5903 __func_from_arm:
5904 ldr r12, __func_offset
5905 add r12, r12, pc
5906 bx r12
5907 __func_offset:
5908 .word func - . */
5909
5910 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5911 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5912 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5913 static const insn32 a2t3_func_addr_insn = 0x00000001;
5914
5915 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5916 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5917 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5918
5919 #define ARM2THUMB_PIC_GLUE_SIZE 16
5920 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5921 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5922 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5923
5924 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5925
5926 .thumb .thumb
5927 .align 2 .align 2
5928 __func_from_thumb: __func_from_thumb:
5929 bx pc push {r6, lr}
5930 nop ldr r6, __func_addr
5931 .arm mov lr, pc
5932 b func bx r6
5933 .arm
5934 ;; back_to_thumb
5935 ldmia r13! {r6, lr}
5936 bx lr
5937 __func_addr:
5938 .word func */
5939
5940 #define THUMB2ARM_GLUE_SIZE 8
5941 static const insn16 t2a1_bx_pc_insn = 0x4778;
5942 static const insn16 t2a2_noop_insn = 0x46c0;
5943 static const insn32 t2a3_b_insn = 0xea000000;
5944
5945 #define VFP11_ERRATUM_VENEER_SIZE 8
5946 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
5947 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
5948
5949 #define ARM_BX_VENEER_SIZE 12
5950 static const insn32 armbx1_tst_insn = 0xe3100001;
5951 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5952 static const insn32 armbx3_bx_insn = 0xe12fff10;
5953
5954 #ifndef ELFARM_NABI_C_INCLUDED
5955 static void
5956 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5957 {
5958 asection * s;
5959 bfd_byte * contents;
5960
5961 if (size == 0)
5962 {
5963 /* Do not include empty glue sections in the output. */
5964 if (abfd != NULL)
5965 {
5966 s = bfd_get_linker_section (abfd, name);
5967 if (s != NULL)
5968 s->flags |= SEC_EXCLUDE;
5969 }
5970 return;
5971 }
5972
5973 BFD_ASSERT (abfd != NULL);
5974
5975 s = bfd_get_linker_section (abfd, name);
5976 BFD_ASSERT (s != NULL);
5977
5978 contents = (bfd_byte *) bfd_alloc (abfd, size);
5979
5980 BFD_ASSERT (s->size == size);
5981 s->contents = contents;
5982 }
5983
5984 bfd_boolean
5985 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5986 {
5987 struct elf32_arm_link_hash_table * globals;
5988
5989 globals = elf32_arm_hash_table (info);
5990 BFD_ASSERT (globals != NULL);
5991
5992 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5993 globals->arm_glue_size,
5994 ARM2THUMB_GLUE_SECTION_NAME);
5995
5996 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5997 globals->thumb_glue_size,
5998 THUMB2ARM_GLUE_SECTION_NAME);
5999
6000 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6001 globals->vfp11_erratum_glue_size,
6002 VFP11_ERRATUM_VENEER_SECTION_NAME);
6003
6004 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6005 globals->stm32l4xx_erratum_glue_size,
6006 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6007
6008 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6009 globals->bx_glue_size,
6010 ARM_BX_GLUE_SECTION_NAME);
6011
6012 return TRUE;
6013 }
6014
6015 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6016 returns the symbol identifying the stub. */
6017
6018 static struct elf_link_hash_entry *
6019 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6020 struct elf_link_hash_entry * h)
6021 {
6022 const char * name = h->root.root.string;
6023 asection * s;
6024 char * tmp_name;
6025 struct elf_link_hash_entry * myh;
6026 struct bfd_link_hash_entry * bh;
6027 struct elf32_arm_link_hash_table * globals;
6028 bfd_vma val;
6029 bfd_size_type size;
6030
6031 globals = elf32_arm_hash_table (link_info);
6032 BFD_ASSERT (globals != NULL);
6033 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6034
6035 s = bfd_get_linker_section
6036 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6037
6038 BFD_ASSERT (s != NULL);
6039
6040 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6041 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6042
6043 BFD_ASSERT (tmp_name);
6044
6045 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6046
6047 myh = elf_link_hash_lookup
6048 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6049
6050 if (myh != NULL)
6051 {
6052 /* We've already seen this guy. */
6053 free (tmp_name);
6054 return myh;
6055 }
6056
6057 /* The only trick here is using hash_table->arm_glue_size as the value.
6058 Even though the section isn't allocated yet, this is where we will be
6059 putting it. The +1 on the value marks that the stub has not been
6060 output yet - not that it is a Thumb function. */
6061 bh = NULL;
6062 val = globals->arm_glue_size + 1;
6063 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6064 tmp_name, BSF_GLOBAL, s, val,
6065 NULL, TRUE, FALSE, &bh);
6066
6067 myh = (struct elf_link_hash_entry *) bh;
6068 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6069 myh->forced_local = 1;
6070
6071 free (tmp_name);
6072
6073 if (bfd_link_pic (link_info)
6074 || globals->root.is_relocatable_executable
6075 || globals->pic_veneer)
6076 size = ARM2THUMB_PIC_GLUE_SIZE;
6077 else if (globals->use_blx)
6078 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6079 else
6080 size = ARM2THUMB_STATIC_GLUE_SIZE;
6081
6082 s->size += size;
6083 globals->arm_glue_size += size;
6084
6085 return myh;
6086 }
6087
6088 /* Allocate space for ARMv4 BX veneers. */
6089
6090 static void
6091 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6092 {
6093 asection * s;
6094 struct elf32_arm_link_hash_table *globals;
6095 char *tmp_name;
6096 struct elf_link_hash_entry *myh;
6097 struct bfd_link_hash_entry *bh;
6098 bfd_vma val;
6099
6100 /* BX PC does not need a veneer. */
6101 if (reg == 15)
6102 return;
6103
6104 globals = elf32_arm_hash_table (link_info);
6105 BFD_ASSERT (globals != NULL);
6106 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6107
6108 /* Check if this veneer has already been allocated. */
6109 if (globals->bx_glue_offset[reg])
6110 return;
6111
6112 s = bfd_get_linker_section
6113 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6114
6115 BFD_ASSERT (s != NULL);
6116
6117 /* Add symbol for veneer. */
6118 tmp_name = (char *)
6119 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6120
6121 BFD_ASSERT (tmp_name);
6122
6123 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6124
6125 myh = elf_link_hash_lookup
6126 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6127
6128 BFD_ASSERT (myh == NULL);
6129
6130 bh = NULL;
6131 val = globals->bx_glue_size;
6132 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6133 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6134 NULL, TRUE, FALSE, &bh);
6135
6136 myh = (struct elf_link_hash_entry *) bh;
6137 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6138 myh->forced_local = 1;
6139
6140 s->size += ARM_BX_VENEER_SIZE;
6141 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6142 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6143 }
6144
6145
6146 /* Add an entry to the code/data map for section SEC. */
6147
6148 static void
6149 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6150 {
6151 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6152 unsigned int newidx;
6153
6154 if (sec_data->map == NULL)
6155 {
6156 sec_data->map = (elf32_arm_section_map *)
6157 bfd_malloc (sizeof (elf32_arm_section_map));
6158 sec_data->mapcount = 0;
6159 sec_data->mapsize = 1;
6160 }
6161
6162 newidx = sec_data->mapcount++;
6163
6164 if (sec_data->mapcount > sec_data->mapsize)
6165 {
6166 sec_data->mapsize *= 2;
6167 sec_data->map = (elf32_arm_section_map *)
6168 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6169 * sizeof (elf32_arm_section_map));
6170 }
6171
6172 if (sec_data->map)
6173 {
6174 sec_data->map[newidx].vma = vma;
6175 sec_data->map[newidx].type = type;
6176 }
6177 }
6178
6179
6180 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6181 veneers are handled for now. */
6182
6183 static bfd_vma
6184 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6185 elf32_vfp11_erratum_list *branch,
6186 bfd *branch_bfd,
6187 asection *branch_sec,
6188 unsigned int offset)
6189 {
6190 asection *s;
6191 struct elf32_arm_link_hash_table *hash_table;
6192 char *tmp_name;
6193 struct elf_link_hash_entry *myh;
6194 struct bfd_link_hash_entry *bh;
6195 bfd_vma val;
6196 struct _arm_elf_section_data *sec_data;
6197 elf32_vfp11_erratum_list *newerr;
6198
6199 hash_table = elf32_arm_hash_table (link_info);
6200 BFD_ASSERT (hash_table != NULL);
6201 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6202
6203 s = bfd_get_linker_section
6204 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6205
6206 sec_data = elf32_arm_section_data (s);
6207
6208 BFD_ASSERT (s != NULL);
6209
6210 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6211 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6212
6213 BFD_ASSERT (tmp_name);
6214
6215 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6216 hash_table->num_vfp11_fixes);
6217
6218 myh = elf_link_hash_lookup
6219 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6220
6221 BFD_ASSERT (myh == NULL);
6222
6223 bh = NULL;
6224 val = hash_table->vfp11_erratum_glue_size;
6225 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6226 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6227 NULL, TRUE, FALSE, &bh);
6228
6229 myh = (struct elf_link_hash_entry *) bh;
6230 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6231 myh->forced_local = 1;
6232
6233 /* Link veneer back to calling location. */
6234 sec_data->erratumcount += 1;
6235 newerr = (elf32_vfp11_erratum_list *)
6236 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6237
6238 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6239 newerr->vma = -1;
6240 newerr->u.v.branch = branch;
6241 newerr->u.v.id = hash_table->num_vfp11_fixes;
6242 branch->u.b.veneer = newerr;
6243
6244 newerr->next = sec_data->erratumlist;
6245 sec_data->erratumlist = newerr;
6246
6247 /* A symbol for the return from the veneer. */
6248 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6249 hash_table->num_vfp11_fixes);
6250
6251 myh = elf_link_hash_lookup
6252 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6253
6254 if (myh != NULL)
6255 abort ();
6256
6257 bh = NULL;
6258 val = offset + 4;
6259 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6260 branch_sec, val, NULL, TRUE, FALSE, &bh);
6261
6262 myh = (struct elf_link_hash_entry *) bh;
6263 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6264 myh->forced_local = 1;
6265
6266 free (tmp_name);
6267
6268 /* Generate a mapping symbol for the veneer section, and explicitly add an
6269 entry for that symbol to the code/data map for the section. */
6270 if (hash_table->vfp11_erratum_glue_size == 0)
6271 {
6272 bh = NULL;
6273 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6274 ever requires this erratum fix. */
6275 _bfd_generic_link_add_one_symbol (link_info,
6276 hash_table->bfd_of_glue_owner, "$a",
6277 BSF_LOCAL, s, 0, NULL,
6278 TRUE, FALSE, &bh);
6279
6280 myh = (struct elf_link_hash_entry *) bh;
6281 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6282 myh->forced_local = 1;
6283
6284 /* The elf32_arm_init_maps function only cares about symbols from input
6285 BFDs. We must make a note of this generated mapping symbol
6286 ourselves so that code byteswapping works properly in
6287 elf32_arm_write_section. */
6288 elf32_arm_section_map_add (s, 'a', 0);
6289 }
6290
6291 s->size += VFP11_ERRATUM_VENEER_SIZE;
6292 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6293 hash_table->num_vfp11_fixes++;
6294
6295 /* The offset of the veneer. */
6296 return val;
6297 }
6298
6299 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6300 veneers need to be handled because used only in Cortex-M. */
6301
6302 static bfd_vma
6303 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6304 elf32_stm32l4xx_erratum_list *branch,
6305 bfd *branch_bfd,
6306 asection *branch_sec,
6307 unsigned int offset,
6308 bfd_size_type veneer_size)
6309 {
6310 asection *s;
6311 struct elf32_arm_link_hash_table *hash_table;
6312 char *tmp_name;
6313 struct elf_link_hash_entry *myh;
6314 struct bfd_link_hash_entry *bh;
6315 bfd_vma val;
6316 struct _arm_elf_section_data *sec_data;
6317 elf32_stm32l4xx_erratum_list *newerr;
6318
6319 hash_table = elf32_arm_hash_table (link_info);
6320 BFD_ASSERT (hash_table != NULL);
6321 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6322
6323 s = bfd_get_linker_section
6324 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6325
6326 BFD_ASSERT (s != NULL);
6327
6328 sec_data = elf32_arm_section_data (s);
6329
6330 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6331 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6332
6333 BFD_ASSERT (tmp_name);
6334
6335 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6336 hash_table->num_stm32l4xx_fixes);
6337
6338 myh = elf_link_hash_lookup
6339 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6340
6341 BFD_ASSERT (myh == NULL);
6342
6343 bh = NULL;
6344 val = hash_table->stm32l4xx_erratum_glue_size;
6345 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6346 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6347 NULL, TRUE, FALSE, &bh);
6348
6349 myh = (struct elf_link_hash_entry *) bh;
6350 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6351 myh->forced_local = 1;
6352
6353 /* Link veneer back to calling location. */
6354 sec_data->stm32l4xx_erratumcount += 1;
6355 newerr = (elf32_stm32l4xx_erratum_list *)
6356 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6357
6358 newerr->type = STM32L4XX_ERRATUM_VENEER;
6359 newerr->vma = -1;
6360 newerr->u.v.branch = branch;
6361 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6362 branch->u.b.veneer = newerr;
6363
6364 newerr->next = sec_data->stm32l4xx_erratumlist;
6365 sec_data->stm32l4xx_erratumlist = newerr;
6366
6367 /* A symbol for the return from the veneer. */
6368 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6369 hash_table->num_stm32l4xx_fixes);
6370
6371 myh = elf_link_hash_lookup
6372 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6373
6374 if (myh != NULL)
6375 abort ();
6376
6377 bh = NULL;
6378 val = offset + 4;
6379 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6380 branch_sec, val, NULL, TRUE, FALSE, &bh);
6381
6382 myh = (struct elf_link_hash_entry *) bh;
6383 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6384 myh->forced_local = 1;
6385
6386 free (tmp_name);
6387
6388 /* Generate a mapping symbol for the veneer section, and explicitly add an
6389 entry for that symbol to the code/data map for the section. */
6390 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6391 {
6392 bh = NULL;
6393 /* Creates a THUMB symbol since there is no other choice. */
6394 _bfd_generic_link_add_one_symbol (link_info,
6395 hash_table->bfd_of_glue_owner, "$t",
6396 BSF_LOCAL, s, 0, NULL,
6397 TRUE, FALSE, &bh);
6398
6399 myh = (struct elf_link_hash_entry *) bh;
6400 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6401 myh->forced_local = 1;
6402
6403 /* The elf32_arm_init_maps function only cares about symbols from input
6404 BFDs. We must make a note of this generated mapping symbol
6405 ourselves so that code byteswapping works properly in
6406 elf32_arm_write_section. */
6407 elf32_arm_section_map_add (s, 't', 0);
6408 }
6409
6410 s->size += veneer_size;
6411 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6412 hash_table->num_stm32l4xx_fixes++;
6413
6414 /* The offset of the veneer. */
6415 return val;
6416 }
6417
6418 #define ARM_GLUE_SECTION_FLAGS \
6419 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6420 | SEC_READONLY | SEC_LINKER_CREATED)
6421
6422 /* Create a fake section for use by the ARM backend of the linker. */
6423
6424 static bfd_boolean
6425 arm_make_glue_section (bfd * abfd, const char * name)
6426 {
6427 asection * sec;
6428
6429 sec = bfd_get_linker_section (abfd, name);
6430 if (sec != NULL)
6431 /* Already made. */
6432 return TRUE;
6433
6434 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6435
6436 if (sec == NULL
6437 || !bfd_set_section_alignment (abfd, sec, 2))
6438 return FALSE;
6439
6440 /* Set the gc mark to prevent the section from being removed by garbage
6441 collection, despite the fact that no relocs refer to this section. */
6442 sec->gc_mark = 1;
6443
6444 return TRUE;
6445 }
6446
6447 /* Set size of .plt entries. This function is called from the
6448 linker scripts in ld/emultempl/{armelf}.em. */
6449
6450 void
6451 bfd_elf32_arm_use_long_plt (void)
6452 {
6453 elf32_arm_use_long_plt_entry = TRUE;
6454 }
6455
6456 /* Add the glue sections to ABFD. This function is called from the
6457 linker scripts in ld/emultempl/{armelf}.em. */
6458
6459 bfd_boolean
6460 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6461 struct bfd_link_info *info)
6462 {
6463 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6464 bfd_boolean dostm32l4xx = globals
6465 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6466 bfd_boolean addglue;
6467
6468 /* If we are only performing a partial
6469 link do not bother adding the glue. */
6470 if (bfd_link_relocatable (info))
6471 return TRUE;
6472
6473 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6474 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6475 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6476 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6477
6478 if (!dostm32l4xx)
6479 return addglue;
6480
6481 return addglue
6482 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6483 }
6484
6485 /* Select a BFD to be used to hold the sections used by the glue code.
6486 This function is called from the linker scripts in ld/emultempl/
6487 {armelf/pe}.em. */
6488
6489 bfd_boolean
6490 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6491 {
6492 struct elf32_arm_link_hash_table *globals;
6493
6494 /* If we are only performing a partial link
6495 do not bother getting a bfd to hold the glue. */
6496 if (bfd_link_relocatable (info))
6497 return TRUE;
6498
6499 /* Make sure we don't attach the glue sections to a dynamic object. */
6500 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6501
6502 globals = elf32_arm_hash_table (info);
6503 BFD_ASSERT (globals != NULL);
6504
6505 if (globals->bfd_of_glue_owner != NULL)
6506 return TRUE;
6507
6508 /* Save the bfd for later use. */
6509 globals->bfd_of_glue_owner = abfd;
6510
6511 return TRUE;
6512 }
6513
6514 static void
6515 check_use_blx (struct elf32_arm_link_hash_table *globals)
6516 {
6517 int cpu_arch;
6518
6519 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6520 Tag_CPU_arch);
6521
6522 if (globals->fix_arm1176)
6523 {
6524 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6525 globals->use_blx = 1;
6526 }
6527 else
6528 {
6529 if (cpu_arch > TAG_CPU_ARCH_V4T)
6530 globals->use_blx = 1;
6531 }
6532 }
6533
6534 bfd_boolean
6535 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6536 struct bfd_link_info *link_info)
6537 {
6538 Elf_Internal_Shdr *symtab_hdr;
6539 Elf_Internal_Rela *internal_relocs = NULL;
6540 Elf_Internal_Rela *irel, *irelend;
6541 bfd_byte *contents = NULL;
6542
6543 asection *sec;
6544 struct elf32_arm_link_hash_table *globals;
6545
6546 /* If we are only performing a partial link do not bother
6547 to construct any glue. */
6548 if (bfd_link_relocatable (link_info))
6549 return TRUE;
6550
6551 /* Here we have a bfd that is to be included on the link. We have a
6552 hook to do reloc rummaging, before section sizes are nailed down. */
6553 globals = elf32_arm_hash_table (link_info);
6554 BFD_ASSERT (globals != NULL);
6555
6556 check_use_blx (globals);
6557
6558 if (globals->byteswap_code && !bfd_big_endian (abfd))
6559 {
6560 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6561 abfd);
6562 return FALSE;
6563 }
6564
6565 /* PR 5398: If we have not decided to include any loadable sections in
6566 the output then we will not have a glue owner bfd. This is OK, it
6567 just means that there is nothing else for us to do here. */
6568 if (globals->bfd_of_glue_owner == NULL)
6569 return TRUE;
6570
6571 /* Rummage around all the relocs and map the glue vectors. */
6572 sec = abfd->sections;
6573
6574 if (sec == NULL)
6575 return TRUE;
6576
6577 for (; sec != NULL; sec = sec->next)
6578 {
6579 if (sec->reloc_count == 0)
6580 continue;
6581
6582 if ((sec->flags & SEC_EXCLUDE) != 0)
6583 continue;
6584
6585 symtab_hdr = & elf_symtab_hdr (abfd);
6586
6587 /* Load the relocs. */
6588 internal_relocs
6589 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6590
6591 if (internal_relocs == NULL)
6592 goto error_return;
6593
6594 irelend = internal_relocs + sec->reloc_count;
6595 for (irel = internal_relocs; irel < irelend; irel++)
6596 {
6597 long r_type;
6598 unsigned long r_index;
6599
6600 struct elf_link_hash_entry *h;
6601
6602 r_type = ELF32_R_TYPE (irel->r_info);
6603 r_index = ELF32_R_SYM (irel->r_info);
6604
6605 /* These are the only relocation types we care about. */
6606 if ( r_type != R_ARM_PC24
6607 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6608 continue;
6609
6610 /* Get the section contents if we haven't done so already. */
6611 if (contents == NULL)
6612 {
6613 /* Get cached copy if it exists. */
6614 if (elf_section_data (sec)->this_hdr.contents != NULL)
6615 contents = elf_section_data (sec)->this_hdr.contents;
6616 else
6617 {
6618 /* Go get them off disk. */
6619 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6620 goto error_return;
6621 }
6622 }
6623
6624 if (r_type == R_ARM_V4BX)
6625 {
6626 int reg;
6627
6628 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6629 record_arm_bx_glue (link_info, reg);
6630 continue;
6631 }
6632
6633 /* If the relocation is not against a symbol it cannot concern us. */
6634 h = NULL;
6635
6636 /* We don't care about local symbols. */
6637 if (r_index < symtab_hdr->sh_info)
6638 continue;
6639
6640 /* This is an external symbol. */
6641 r_index -= symtab_hdr->sh_info;
6642 h = (struct elf_link_hash_entry *)
6643 elf_sym_hashes (abfd)[r_index];
6644
6645 /* If the relocation is against a static symbol it must be within
6646 the current section and so cannot be a cross ARM/Thumb relocation. */
6647 if (h == NULL)
6648 continue;
6649
6650 /* If the call will go through a PLT entry then we do not need
6651 glue. */
6652 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6653 continue;
6654
6655 switch (r_type)
6656 {
6657 case R_ARM_PC24:
6658 /* This one is a call from arm code. We need to look up
6659 the target of the call. If it is a thumb target, we
6660 insert glue. */
6661 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6662 == ST_BRANCH_TO_THUMB)
6663 record_arm_to_thumb_glue (link_info, h);
6664 break;
6665
6666 default:
6667 abort ();
6668 }
6669 }
6670
6671 if (contents != NULL
6672 && elf_section_data (sec)->this_hdr.contents != contents)
6673 free (contents);
6674 contents = NULL;
6675
6676 if (internal_relocs != NULL
6677 && elf_section_data (sec)->relocs != internal_relocs)
6678 free (internal_relocs);
6679 internal_relocs = NULL;
6680 }
6681
6682 return TRUE;
6683
6684 error_return:
6685 if (contents != NULL
6686 && elf_section_data (sec)->this_hdr.contents != contents)
6687 free (contents);
6688 if (internal_relocs != NULL
6689 && elf_section_data (sec)->relocs != internal_relocs)
6690 free (internal_relocs);
6691
6692 return FALSE;
6693 }
6694 #endif
6695
6696
6697 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6698
6699 void
6700 bfd_elf32_arm_init_maps (bfd *abfd)
6701 {
6702 Elf_Internal_Sym *isymbuf;
6703 Elf_Internal_Shdr *hdr;
6704 unsigned int i, localsyms;
6705
6706 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6707 if (! is_arm_elf (abfd))
6708 return;
6709
6710 if ((abfd->flags & DYNAMIC) != 0)
6711 return;
6712
6713 hdr = & elf_symtab_hdr (abfd);
6714 localsyms = hdr->sh_info;
6715
6716 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6717 should contain the number of local symbols, which should come before any
6718 global symbols. Mapping symbols are always local. */
6719 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6720 NULL);
6721
6722 /* No internal symbols read? Skip this BFD. */
6723 if (isymbuf == NULL)
6724 return;
6725
6726 for (i = 0; i < localsyms; i++)
6727 {
6728 Elf_Internal_Sym *isym = &isymbuf[i];
6729 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6730 const char *name;
6731
6732 if (sec != NULL
6733 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6734 {
6735 name = bfd_elf_string_from_elf_section (abfd,
6736 hdr->sh_link, isym->st_name);
6737
6738 if (bfd_is_arm_special_symbol_name (name,
6739 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6740 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6741 }
6742 }
6743 }
6744
6745
6746 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6747 say what they wanted. */
6748
6749 void
6750 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6751 {
6752 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6753 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6754
6755 if (globals == NULL)
6756 return;
6757
6758 if (globals->fix_cortex_a8 == -1)
6759 {
6760 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6761 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6762 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6763 || out_attr[Tag_CPU_arch_profile].i == 0))
6764 globals->fix_cortex_a8 = 1;
6765 else
6766 globals->fix_cortex_a8 = 0;
6767 }
6768 }
6769
6770
6771 void
6772 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6773 {
6774 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6775 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6776
6777 if (globals == NULL)
6778 return;
6779 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6780 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6781 {
6782 switch (globals->vfp11_fix)
6783 {
6784 case BFD_ARM_VFP11_FIX_DEFAULT:
6785 case BFD_ARM_VFP11_FIX_NONE:
6786 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6787 break;
6788
6789 default:
6790 /* Give a warning, but do as the user requests anyway. */
6791 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6792 "workaround is not necessary for target architecture"), obfd);
6793 }
6794 }
6795 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6796 /* For earlier architectures, we might need the workaround, but do not
6797 enable it by default. If users is running with broken hardware, they
6798 must enable the erratum fix explicitly. */
6799 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6800 }
6801
6802 void
6803 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
6804 {
6805 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6806 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6807
6808 if (globals == NULL)
6809 return;
6810
6811 /* We assume only Cortex-M4 may require the fix. */
6812 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
6813 || out_attr[Tag_CPU_arch_profile].i != 'M')
6814 {
6815 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
6816 /* Give a warning, but do as the user requests anyway. */
6817 (*_bfd_error_handler)
6818 (_("%B: warning: selected STM32L4XX erratum "
6819 "workaround is not necessary for target architecture"), obfd);
6820 }
6821 }
6822
6823 enum bfd_arm_vfp11_pipe
6824 {
6825 VFP11_FMAC,
6826 VFP11_LS,
6827 VFP11_DS,
6828 VFP11_BAD
6829 };
6830
6831 /* Return a VFP register number. This is encoded as RX:X for single-precision
6832 registers, or X:RX for double-precision registers, where RX is the group of
6833 four bits in the instruction encoding and X is the single extension bit.
6834 RX and X fields are specified using their lowest (starting) bit. The return
6835 value is:
6836
6837 0...31: single-precision registers s0...s31
6838 32...63: double-precision registers d0...d31.
6839
6840 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6841 encounter VFP3 instructions, so we allow the full range for DP registers. */
6842
6843 static unsigned int
6844 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6845 unsigned int x)
6846 {
6847 if (is_double)
6848 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6849 else
6850 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6851 }
6852
6853 /* Set bits in *WMASK according to a register number REG as encoded by
6854 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6855
6856 static void
6857 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6858 {
6859 if (reg < 32)
6860 *wmask |= 1 << reg;
6861 else if (reg < 48)
6862 *wmask |= 3 << ((reg - 32) * 2);
6863 }
6864
6865 /* Return TRUE if WMASK overwrites anything in REGS. */
6866
6867 static bfd_boolean
6868 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6869 {
6870 int i;
6871
6872 for (i = 0; i < numregs; i++)
6873 {
6874 unsigned int reg = regs[i];
6875
6876 if (reg < 32 && (wmask & (1 << reg)) != 0)
6877 return TRUE;
6878
6879 reg -= 32;
6880
6881 if (reg >= 16)
6882 continue;
6883
6884 if ((wmask & (3 << (reg * 2))) != 0)
6885 return TRUE;
6886 }
6887
6888 return FALSE;
6889 }
6890
6891 /* In this function, we're interested in two things: finding input registers
6892 for VFP data-processing instructions, and finding the set of registers which
6893 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6894 hold the written set, so FLDM etc. are easy to deal with (we're only
6895 interested in 32 SP registers or 16 dp registers, due to the VFP version
6896 implemented by the chip in question). DP registers are marked by setting
6897 both SP registers in the write mask). */
6898
6899 static enum bfd_arm_vfp11_pipe
6900 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6901 int *numregs)
6902 {
6903 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6904 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6905
6906 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6907 {
6908 unsigned int pqrs;
6909 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6910 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6911
6912 pqrs = ((insn & 0x00800000) >> 20)
6913 | ((insn & 0x00300000) >> 19)
6914 | ((insn & 0x00000040) >> 6);
6915
6916 switch (pqrs)
6917 {
6918 case 0: /* fmac[sd]. */
6919 case 1: /* fnmac[sd]. */
6920 case 2: /* fmsc[sd]. */
6921 case 3: /* fnmsc[sd]. */
6922 vpipe = VFP11_FMAC;
6923 bfd_arm_vfp11_write_mask (destmask, fd);
6924 regs[0] = fd;
6925 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6926 regs[2] = fm;
6927 *numregs = 3;
6928 break;
6929
6930 case 4: /* fmul[sd]. */
6931 case 5: /* fnmul[sd]. */
6932 case 6: /* fadd[sd]. */
6933 case 7: /* fsub[sd]. */
6934 vpipe = VFP11_FMAC;
6935 goto vfp_binop;
6936
6937 case 8: /* fdiv[sd]. */
6938 vpipe = VFP11_DS;
6939 vfp_binop:
6940 bfd_arm_vfp11_write_mask (destmask, fd);
6941 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6942 regs[1] = fm;
6943 *numregs = 2;
6944 break;
6945
6946 case 15: /* extended opcode. */
6947 {
6948 unsigned int extn = ((insn >> 15) & 0x1e)
6949 | ((insn >> 7) & 1);
6950
6951 switch (extn)
6952 {
6953 case 0: /* fcpy[sd]. */
6954 case 1: /* fabs[sd]. */
6955 case 2: /* fneg[sd]. */
6956 case 8: /* fcmp[sd]. */
6957 case 9: /* fcmpe[sd]. */
6958 case 10: /* fcmpz[sd]. */
6959 case 11: /* fcmpez[sd]. */
6960 case 16: /* fuito[sd]. */
6961 case 17: /* fsito[sd]. */
6962 case 24: /* ftoui[sd]. */
6963 case 25: /* ftouiz[sd]. */
6964 case 26: /* ftosi[sd]. */
6965 case 27: /* ftosiz[sd]. */
6966 /* These instructions will not bounce due to underflow. */
6967 *numregs = 0;
6968 vpipe = VFP11_FMAC;
6969 break;
6970
6971 case 3: /* fsqrt[sd]. */
6972 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6973 registers to cause the erratum in previous instructions. */
6974 bfd_arm_vfp11_write_mask (destmask, fd);
6975 vpipe = VFP11_DS;
6976 break;
6977
6978 case 15: /* fcvt{ds,sd}. */
6979 {
6980 int rnum = 0;
6981
6982 bfd_arm_vfp11_write_mask (destmask, fd);
6983
6984 /* Only FCVTSD can underflow. */
6985 if ((insn & 0x100) != 0)
6986 regs[rnum++] = fm;
6987
6988 *numregs = rnum;
6989
6990 vpipe = VFP11_FMAC;
6991 }
6992 break;
6993
6994 default:
6995 return VFP11_BAD;
6996 }
6997 }
6998 break;
6999
7000 default:
7001 return VFP11_BAD;
7002 }
7003 }
7004 /* Two-register transfer. */
7005 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7006 {
7007 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7008
7009 if ((insn & 0x100000) == 0)
7010 {
7011 if (is_double)
7012 bfd_arm_vfp11_write_mask (destmask, fm);
7013 else
7014 {
7015 bfd_arm_vfp11_write_mask (destmask, fm);
7016 bfd_arm_vfp11_write_mask (destmask, fm + 1);
7017 }
7018 }
7019
7020 vpipe = VFP11_LS;
7021 }
7022 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
7023 {
7024 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7025 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7026
7027 switch (puw)
7028 {
7029 case 0: /* Two-reg transfer. We should catch these above. */
7030 abort ();
7031
7032 case 2: /* fldm[sdx]. */
7033 case 3:
7034 case 5:
7035 {
7036 unsigned int i, offset = insn & 0xff;
7037
7038 if (is_double)
7039 offset >>= 1;
7040
7041 for (i = fd; i < fd + offset; i++)
7042 bfd_arm_vfp11_write_mask (destmask, i);
7043 }
7044 break;
7045
7046 case 4: /* fld[sd]. */
7047 case 6:
7048 bfd_arm_vfp11_write_mask (destmask, fd);
7049 break;
7050
7051 default:
7052 return VFP11_BAD;
7053 }
7054
7055 vpipe = VFP11_LS;
7056 }
7057 /* Single-register transfer. Note L==0. */
7058 else if ((insn & 0x0f100e10) == 0x0e000a10)
7059 {
7060 unsigned int opcode = (insn >> 21) & 7;
7061 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7062
7063 switch (opcode)
7064 {
7065 case 0: /* fmsr/fmdlr. */
7066 case 1: /* fmdhr. */
7067 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7068 destination register. I don't know if this is exactly right,
7069 but it is the conservative choice. */
7070 bfd_arm_vfp11_write_mask (destmask, fn);
7071 break;
7072
7073 case 7: /* fmxr. */
7074 break;
7075 }
7076
7077 vpipe = VFP11_LS;
7078 }
7079
7080 return vpipe;
7081 }
7082
7083
7084 static int elf32_arm_compare_mapping (const void * a, const void * b);
7085
7086
7087 /* Look for potentially-troublesome code sequences which might trigger the
7088 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7089 (available from ARM) for details of the erratum. A short version is
7090 described in ld.texinfo. */
7091
7092 bfd_boolean
7093 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7094 {
7095 asection *sec;
7096 bfd_byte *contents = NULL;
7097 int state = 0;
7098 int regs[3], numregs = 0;
7099 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7100 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7101
7102 if (globals == NULL)
7103 return FALSE;
7104
7105 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7106 The states transition as follows:
7107
7108 0 -> 1 (vector) or 0 -> 2 (scalar)
7109 A VFP FMAC-pipeline instruction has been seen. Fill
7110 regs[0]..regs[numregs-1] with its input operands. Remember this
7111 instruction in 'first_fmac'.
7112
7113 1 -> 2
7114 Any instruction, except for a VFP instruction which overwrites
7115 regs[*].
7116
7117 1 -> 3 [ -> 0 ] or
7118 2 -> 3 [ -> 0 ]
7119 A VFP instruction has been seen which overwrites any of regs[*].
7120 We must make a veneer! Reset state to 0 before examining next
7121 instruction.
7122
7123 2 -> 0
7124 If we fail to match anything in state 2, reset to state 0 and reset
7125 the instruction pointer to the instruction after 'first_fmac'.
7126
7127 If the VFP11 vector mode is in use, there must be at least two unrelated
7128 instructions between anti-dependent VFP11 instructions to properly avoid
7129 triggering the erratum, hence the use of the extra state 1. */
7130
7131 /* If we are only performing a partial link do not bother
7132 to construct any glue. */
7133 if (bfd_link_relocatable (link_info))
7134 return TRUE;
7135
7136 /* Skip if this bfd does not correspond to an ELF image. */
7137 if (! is_arm_elf (abfd))
7138 return TRUE;
7139
7140 /* We should have chosen a fix type by the time we get here. */
7141 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7142
7143 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7144 return TRUE;
7145
7146 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7147 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7148 return TRUE;
7149
7150 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7151 {
7152 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7153 struct _arm_elf_section_data *sec_data;
7154
7155 /* If we don't have executable progbits, we're not interested in this
7156 section. Also skip if section is to be excluded. */
7157 if (elf_section_type (sec) != SHT_PROGBITS
7158 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7159 || (sec->flags & SEC_EXCLUDE) != 0
7160 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7161 || sec->output_section == bfd_abs_section_ptr
7162 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7163 continue;
7164
7165 sec_data = elf32_arm_section_data (sec);
7166
7167 if (sec_data->mapcount == 0)
7168 continue;
7169
7170 if (elf_section_data (sec)->this_hdr.contents != NULL)
7171 contents = elf_section_data (sec)->this_hdr.contents;
7172 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7173 goto error_return;
7174
7175 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7176 elf32_arm_compare_mapping);
7177
7178 for (span = 0; span < sec_data->mapcount; span++)
7179 {
7180 unsigned int span_start = sec_data->map[span].vma;
7181 unsigned int span_end = (span == sec_data->mapcount - 1)
7182 ? sec->size : sec_data->map[span + 1].vma;
7183 char span_type = sec_data->map[span].type;
7184
7185 /* FIXME: Only ARM mode is supported at present. We may need to
7186 support Thumb-2 mode also at some point. */
7187 if (span_type != 'a')
7188 continue;
7189
7190 for (i = span_start; i < span_end;)
7191 {
7192 unsigned int next_i = i + 4;
7193 unsigned int insn = bfd_big_endian (abfd)
7194 ? (contents[i] << 24)
7195 | (contents[i + 1] << 16)
7196 | (contents[i + 2] << 8)
7197 | contents[i + 3]
7198 : (contents[i + 3] << 24)
7199 | (contents[i + 2] << 16)
7200 | (contents[i + 1] << 8)
7201 | contents[i];
7202 unsigned int writemask = 0;
7203 enum bfd_arm_vfp11_pipe vpipe;
7204
7205 switch (state)
7206 {
7207 case 0:
7208 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7209 &numregs);
7210 /* I'm assuming the VFP11 erratum can trigger with denorm
7211 operands on either the FMAC or the DS pipeline. This might
7212 lead to slightly overenthusiastic veneer insertion. */
7213 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7214 {
7215 state = use_vector ? 1 : 2;
7216 first_fmac = i;
7217 veneer_of_insn = insn;
7218 }
7219 break;
7220
7221 case 1:
7222 {
7223 int other_regs[3], other_numregs;
7224 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7225 other_regs,
7226 &other_numregs);
7227 if (vpipe != VFP11_BAD
7228 && bfd_arm_vfp11_antidependency (writemask, regs,
7229 numregs))
7230 state = 3;
7231 else
7232 state = 2;
7233 }
7234 break;
7235
7236 case 2:
7237 {
7238 int other_regs[3], other_numregs;
7239 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7240 other_regs,
7241 &other_numregs);
7242 if (vpipe != VFP11_BAD
7243 && bfd_arm_vfp11_antidependency (writemask, regs,
7244 numregs))
7245 state = 3;
7246 else
7247 {
7248 state = 0;
7249 next_i = first_fmac + 4;
7250 }
7251 }
7252 break;
7253
7254 case 3:
7255 abort (); /* Should be unreachable. */
7256 }
7257
7258 if (state == 3)
7259 {
7260 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7261 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7262
7263 elf32_arm_section_data (sec)->erratumcount += 1;
7264
7265 newerr->u.b.vfp_insn = veneer_of_insn;
7266
7267 switch (span_type)
7268 {
7269 case 'a':
7270 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7271 break;
7272
7273 default:
7274 abort ();
7275 }
7276
7277 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7278 first_fmac);
7279
7280 newerr->vma = -1;
7281
7282 newerr->next = sec_data->erratumlist;
7283 sec_data->erratumlist = newerr;
7284
7285 state = 0;
7286 }
7287
7288 i = next_i;
7289 }
7290 }
7291
7292 if (contents != NULL
7293 && elf_section_data (sec)->this_hdr.contents != contents)
7294 free (contents);
7295 contents = NULL;
7296 }
7297
7298 return TRUE;
7299
7300 error_return:
7301 if (contents != NULL
7302 && elf_section_data (sec)->this_hdr.contents != contents)
7303 free (contents);
7304
7305 return FALSE;
7306 }
7307
7308 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7309 after sections have been laid out, using specially-named symbols. */
7310
7311 void
7312 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7313 struct bfd_link_info *link_info)
7314 {
7315 asection *sec;
7316 struct elf32_arm_link_hash_table *globals;
7317 char *tmp_name;
7318
7319 if (bfd_link_relocatable (link_info))
7320 return;
7321
7322 /* Skip if this bfd does not correspond to an ELF image. */
7323 if (! is_arm_elf (abfd))
7324 return;
7325
7326 globals = elf32_arm_hash_table (link_info);
7327 if (globals == NULL)
7328 return;
7329
7330 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7331 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7332
7333 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7334 {
7335 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7336 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7337
7338 for (; errnode != NULL; errnode = errnode->next)
7339 {
7340 struct elf_link_hash_entry *myh;
7341 bfd_vma vma;
7342
7343 switch (errnode->type)
7344 {
7345 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7346 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7347 /* Find veneer symbol. */
7348 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7349 errnode->u.b.veneer->u.v.id);
7350
7351 myh = elf_link_hash_lookup
7352 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7353
7354 if (myh == NULL)
7355 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7356 "`%s'"), abfd, tmp_name);
7357
7358 vma = myh->root.u.def.section->output_section->vma
7359 + myh->root.u.def.section->output_offset
7360 + myh->root.u.def.value;
7361
7362 errnode->u.b.veneer->vma = vma;
7363 break;
7364
7365 case VFP11_ERRATUM_ARM_VENEER:
7366 case VFP11_ERRATUM_THUMB_VENEER:
7367 /* Find return location. */
7368 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7369 errnode->u.v.id);
7370
7371 myh = elf_link_hash_lookup
7372 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7373
7374 if (myh == NULL)
7375 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7376 "`%s'"), abfd, tmp_name);
7377
7378 vma = myh->root.u.def.section->output_section->vma
7379 + myh->root.u.def.section->output_offset
7380 + myh->root.u.def.value;
7381
7382 errnode->u.v.branch->vma = vma;
7383 break;
7384
7385 default:
7386 abort ();
7387 }
7388 }
7389 }
7390
7391 free (tmp_name);
7392 }
7393
7394 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7395 return locations after sections have been laid out, using
7396 specially-named symbols. */
7397
7398 void
7399 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7400 struct bfd_link_info *link_info)
7401 {
7402 asection *sec;
7403 struct elf32_arm_link_hash_table *globals;
7404 char *tmp_name;
7405
7406 if (bfd_link_relocatable (link_info))
7407 return;
7408
7409 /* Skip if this bfd does not correspond to an ELF image. */
7410 if (! is_arm_elf (abfd))
7411 return;
7412
7413 globals = elf32_arm_hash_table (link_info);
7414 if (globals == NULL)
7415 return;
7416
7417 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7418 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7419
7420 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7421 {
7422 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7423 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7424
7425 for (; errnode != NULL; errnode = errnode->next)
7426 {
7427 struct elf_link_hash_entry *myh;
7428 bfd_vma vma;
7429
7430 switch (errnode->type)
7431 {
7432 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7433 /* Find veneer symbol. */
7434 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7435 errnode->u.b.veneer->u.v.id);
7436
7437 myh = elf_link_hash_lookup
7438 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7439
7440 if (myh == NULL)
7441 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7442 "`%s'"), abfd, tmp_name);
7443
7444 vma = myh->root.u.def.section->output_section->vma
7445 + myh->root.u.def.section->output_offset
7446 + myh->root.u.def.value;
7447
7448 errnode->u.b.veneer->vma = vma;
7449 break;
7450
7451 case STM32L4XX_ERRATUM_VENEER:
7452 /* Find return location. */
7453 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7454 errnode->u.v.id);
7455
7456 myh = elf_link_hash_lookup
7457 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7458
7459 if (myh == NULL)
7460 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7461 "`%s'"), abfd, tmp_name);
7462
7463 vma = myh->root.u.def.section->output_section->vma
7464 + myh->root.u.def.section->output_offset
7465 + myh->root.u.def.value;
7466
7467 errnode->u.v.branch->vma = vma;
7468 break;
7469
7470 default:
7471 abort ();
7472 }
7473 }
7474 }
7475
7476 free (tmp_name);
7477 }
7478
7479 static inline bfd_boolean
7480 is_thumb2_ldmia (const insn32 insn)
7481 {
7482 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7483 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7484 return (insn & 0xffd02000) == 0xe8900000;
7485 }
7486
7487 static inline bfd_boolean
7488 is_thumb2_ldmdb (const insn32 insn)
7489 {
7490 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7491 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7492 return (insn & 0xffd02000) == 0xe9100000;
7493 }
7494
7495 static inline bfd_boolean
7496 is_thumb2_vldm (const insn32 insn)
7497 {
7498 /* A6.5 Extension register load or store instruction
7499 A7.7.229
7500 We look for SP 32-bit and DP 64-bit registers.
7501 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7502 <list> is consecutive 64-bit registers
7503 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7504 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7505 <list> is consecutive 32-bit registers
7506 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7507 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7508 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7509 return
7510 (((insn & 0xfe100f00) == 0xec100b00) ||
7511 ((insn & 0xfe100f00) == 0xec100a00))
7512 && /* (IA without !). */
7513 (((((insn << 7) >> 28) & 0xd) == 0x4)
7514 /* (IA with !), includes VPOP (when reg number is SP). */
7515 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7516 /* (DB with !). */
7517 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7518 }
7519
7520 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7521 VLDM opcode and:
7522 - computes the number and the mode of memory accesses
7523 - decides if the replacement should be done:
7524 . replaces only if > 8-word accesses
7525 . or (testing purposes only) replaces all accesses. */
7526
7527 static bfd_boolean
7528 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7529 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7530 {
7531 int nb_words = 0;
7532
7533 /* The field encoding the register list is the same for both LDMIA
7534 and LDMDB encodings. */
7535 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7536 nb_words = popcount (insn & 0x0000ffff);
7537 else if (is_thumb2_vldm (insn))
7538 nb_words = (insn & 0xff);
7539
7540 /* DEFAULT mode accounts for the real bug condition situation,
7541 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7542 return
7543 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7544 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7545 }
7546
7547 /* Look for potentially-troublesome code sequences which might trigger
7548 the STM STM32L4XX erratum. */
7549
7550 bfd_boolean
7551 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7552 struct bfd_link_info *link_info)
7553 {
7554 asection *sec;
7555 bfd_byte *contents = NULL;
7556 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7557
7558 if (globals == NULL)
7559 return FALSE;
7560
7561 /* If we are only performing a partial link do not bother
7562 to construct any glue. */
7563 if (bfd_link_relocatable (link_info))
7564 return TRUE;
7565
7566 /* Skip if this bfd does not correspond to an ELF image. */
7567 if (! is_arm_elf (abfd))
7568 return TRUE;
7569
7570 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7571 return TRUE;
7572
7573 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7574 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7575 return TRUE;
7576
7577 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7578 {
7579 unsigned int i, span;
7580 struct _arm_elf_section_data *sec_data;
7581
7582 /* If we don't have executable progbits, we're not interested in this
7583 section. Also skip if section is to be excluded. */
7584 if (elf_section_type (sec) != SHT_PROGBITS
7585 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7586 || (sec->flags & SEC_EXCLUDE) != 0
7587 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7588 || sec->output_section == bfd_abs_section_ptr
7589 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7590 continue;
7591
7592 sec_data = elf32_arm_section_data (sec);
7593
7594 if (sec_data->mapcount == 0)
7595 continue;
7596
7597 if (elf_section_data (sec)->this_hdr.contents != NULL)
7598 contents = elf_section_data (sec)->this_hdr.contents;
7599 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7600 goto error_return;
7601
7602 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7603 elf32_arm_compare_mapping);
7604
7605 for (span = 0; span < sec_data->mapcount; span++)
7606 {
7607 unsigned int span_start = sec_data->map[span].vma;
7608 unsigned int span_end = (span == sec_data->mapcount - 1)
7609 ? sec->size : sec_data->map[span + 1].vma;
7610 char span_type = sec_data->map[span].type;
7611 int itblock_current_pos = 0;
7612
7613 /* Only Thumb2 mode need be supported with this CM4 specific
7614 code, we should not encounter any arm mode eg span_type
7615 != 'a'. */
7616 if (span_type != 't')
7617 continue;
7618
7619 for (i = span_start; i < span_end;)
7620 {
7621 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7622 bfd_boolean insn_32bit = FALSE;
7623 bfd_boolean is_ldm = FALSE;
7624 bfd_boolean is_vldm = FALSE;
7625 bfd_boolean is_not_last_in_it_block = FALSE;
7626
7627 /* The first 16-bits of all 32-bit thumb2 instructions start
7628 with opcode[15..13]=0b111 and the encoded op1 can be anything
7629 except opcode[12..11]!=0b00.
7630 See 32-bit Thumb instruction encoding. */
7631 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7632 insn_32bit = TRUE;
7633
7634 /* Compute the predicate that tells if the instruction
7635 is concerned by the IT block
7636 - Creates an error if there is a ldm that is not
7637 last in the IT block thus cannot be replaced
7638 - Otherwise we can create a branch at the end of the
7639 IT block, it will be controlled naturally by IT
7640 with the proper pseudo-predicate
7641 - So the only interesting predicate is the one that
7642 tells that we are not on the last item of an IT
7643 block. */
7644 if (itblock_current_pos != 0)
7645 is_not_last_in_it_block = !!--itblock_current_pos;
7646
7647 if (insn_32bit)
7648 {
7649 /* Load the rest of the insn (in manual-friendly order). */
7650 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7651 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7652 is_vldm = is_thumb2_vldm (insn);
7653
7654 /* Veneers are created for (v)ldm depending on
7655 option flags and memory accesses conditions; but
7656 if the instruction is not the last instruction of
7657 an IT block, we cannot create a jump there, so we
7658 bail out. */
7659 if ((is_ldm || is_vldm) &&
7660 stm32l4xx_need_create_replacing_stub
7661 (insn, globals->stm32l4xx_fix))
7662 {
7663 if (is_not_last_in_it_block)
7664 {
7665 (*_bfd_error_handler)
7666 /* Note - overlong line used here to allow for translation. */
7667 (_("\
7668 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7669 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7670 abfd, sec, (long)i);
7671 }
7672 else
7673 {
7674 elf32_stm32l4xx_erratum_list *newerr =
7675 (elf32_stm32l4xx_erratum_list *)
7676 bfd_zmalloc
7677 (sizeof (elf32_stm32l4xx_erratum_list));
7678
7679 elf32_arm_section_data (sec)
7680 ->stm32l4xx_erratumcount += 1;
7681 newerr->u.b.insn = insn;
7682 /* We create only thumb branches. */
7683 newerr->type =
7684 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7685 record_stm32l4xx_erratum_veneer
7686 (link_info, newerr, abfd, sec,
7687 i,
7688 is_ldm ?
7689 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7690 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7691 newerr->vma = -1;
7692 newerr->next = sec_data->stm32l4xx_erratumlist;
7693 sec_data->stm32l4xx_erratumlist = newerr;
7694 }
7695 }
7696 }
7697 else
7698 {
7699 /* A7.7.37 IT p208
7700 IT blocks are only encoded in T1
7701 Encoding T1: IT{x{y{z}}} <firstcond>
7702 1 0 1 1 - 1 1 1 1 - firstcond - mask
7703 if mask = '0000' then see 'related encodings'
7704 We don't deal with UNPREDICTABLE, just ignore these.
7705 There can be no nested IT blocks so an IT block
7706 is naturally a new one for which it is worth
7707 computing its size. */
7708 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7709 ((insn & 0x000f) != 0x0000);
7710 /* If we have a new IT block we compute its size. */
7711 if (is_newitblock)
7712 {
7713 /* Compute the number of instructions controlled
7714 by the IT block, it will be used to decide
7715 whether we are inside an IT block or not. */
7716 unsigned int mask = insn & 0x000f;
7717 itblock_current_pos = 4 - ctz (mask);
7718 }
7719 }
7720
7721 i += insn_32bit ? 4 : 2;
7722 }
7723 }
7724
7725 if (contents != NULL
7726 && elf_section_data (sec)->this_hdr.contents != contents)
7727 free (contents);
7728 contents = NULL;
7729 }
7730
7731 return TRUE;
7732
7733 error_return:
7734 if (contents != NULL
7735 && elf_section_data (sec)->this_hdr.contents != contents)
7736 free (contents);
7737
7738 return FALSE;
7739 }
7740
7741 /* Set target relocation values needed during linking. */
7742
7743 void
7744 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7745 struct bfd_link_info *link_info,
7746 int target1_is_rel,
7747 char * target2_type,
7748 int fix_v4bx,
7749 int use_blx,
7750 bfd_arm_vfp11_fix vfp11_fix,
7751 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7752 int no_enum_warn, int no_wchar_warn,
7753 int pic_veneer, int fix_cortex_a8,
7754 int fix_arm1176)
7755 {
7756 struct elf32_arm_link_hash_table *globals;
7757
7758 globals = elf32_arm_hash_table (link_info);
7759 if (globals == NULL)
7760 return;
7761
7762 globals->target1_is_rel = target1_is_rel;
7763 if (strcmp (target2_type, "rel") == 0)
7764 globals->target2_reloc = R_ARM_REL32;
7765 else if (strcmp (target2_type, "abs") == 0)
7766 globals->target2_reloc = R_ARM_ABS32;
7767 else if (strcmp (target2_type, "got-rel") == 0)
7768 globals->target2_reloc = R_ARM_GOT_PREL;
7769 else
7770 {
7771 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7772 target2_type);
7773 }
7774 globals->fix_v4bx = fix_v4bx;
7775 globals->use_blx |= use_blx;
7776 globals->vfp11_fix = vfp11_fix;
7777 globals->stm32l4xx_fix = stm32l4xx_fix;
7778 globals->pic_veneer = pic_veneer;
7779 globals->fix_cortex_a8 = fix_cortex_a8;
7780 globals->fix_arm1176 = fix_arm1176;
7781
7782 BFD_ASSERT (is_arm_elf (output_bfd));
7783 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7784 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7785 }
7786
7787 /* Replace the target offset of a Thumb bl or b.w instruction. */
7788
7789 static void
7790 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7791 {
7792 bfd_vma upper;
7793 bfd_vma lower;
7794 int reloc_sign;
7795
7796 BFD_ASSERT ((offset & 1) == 0);
7797
7798 upper = bfd_get_16 (abfd, insn);
7799 lower = bfd_get_16 (abfd, insn + 2);
7800 reloc_sign = (offset < 0) ? 1 : 0;
7801 upper = (upper & ~(bfd_vma) 0x7ff)
7802 | ((offset >> 12) & 0x3ff)
7803 | (reloc_sign << 10);
7804 lower = (lower & ~(bfd_vma) 0x2fff)
7805 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7806 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7807 | ((offset >> 1) & 0x7ff);
7808 bfd_put_16 (abfd, upper, insn);
7809 bfd_put_16 (abfd, lower, insn + 2);
7810 }
7811
7812 /* Thumb code calling an ARM function. */
7813
7814 static int
7815 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7816 const char * name,
7817 bfd * input_bfd,
7818 bfd * output_bfd,
7819 asection * input_section,
7820 bfd_byte * hit_data,
7821 asection * sym_sec,
7822 bfd_vma offset,
7823 bfd_signed_vma addend,
7824 bfd_vma val,
7825 char **error_message)
7826 {
7827 asection * s = 0;
7828 bfd_vma my_offset;
7829 long int ret_offset;
7830 struct elf_link_hash_entry * myh;
7831 struct elf32_arm_link_hash_table * globals;
7832
7833 myh = find_thumb_glue (info, name, error_message);
7834 if (myh == NULL)
7835 return FALSE;
7836
7837 globals = elf32_arm_hash_table (info);
7838 BFD_ASSERT (globals != NULL);
7839 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7840
7841 my_offset = myh->root.u.def.value;
7842
7843 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7844 THUMB2ARM_GLUE_SECTION_NAME);
7845
7846 BFD_ASSERT (s != NULL);
7847 BFD_ASSERT (s->contents != NULL);
7848 BFD_ASSERT (s->output_section != NULL);
7849
7850 if ((my_offset & 0x01) == 0x01)
7851 {
7852 if (sym_sec != NULL
7853 && sym_sec->owner != NULL
7854 && !INTERWORK_FLAG (sym_sec->owner))
7855 {
7856 (*_bfd_error_handler)
7857 (_("%B(%s): warning: interworking not enabled.\n"
7858 " first occurrence: %B: Thumb call to ARM"),
7859 sym_sec->owner, input_bfd, name);
7860
7861 return FALSE;
7862 }
7863
7864 --my_offset;
7865 myh->root.u.def.value = my_offset;
7866
7867 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7868 s->contents + my_offset);
7869
7870 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7871 s->contents + my_offset + 2);
7872
7873 ret_offset =
7874 /* Address of destination of the stub. */
7875 ((bfd_signed_vma) val)
7876 - ((bfd_signed_vma)
7877 /* Offset from the start of the current section
7878 to the start of the stubs. */
7879 (s->output_offset
7880 /* Offset of the start of this stub from the start of the stubs. */
7881 + my_offset
7882 /* Address of the start of the current section. */
7883 + s->output_section->vma)
7884 /* The branch instruction is 4 bytes into the stub. */
7885 + 4
7886 /* ARM branches work from the pc of the instruction + 8. */
7887 + 8);
7888
7889 put_arm_insn (globals, output_bfd,
7890 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7891 s->contents + my_offset + 4);
7892 }
7893
7894 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7895
7896 /* Now go back and fix up the original BL insn to point to here. */
7897 ret_offset =
7898 /* Address of where the stub is located. */
7899 (s->output_section->vma + s->output_offset + my_offset)
7900 /* Address of where the BL is located. */
7901 - (input_section->output_section->vma + input_section->output_offset
7902 + offset)
7903 /* Addend in the relocation. */
7904 - addend
7905 /* Biassing for PC-relative addressing. */
7906 - 8;
7907
7908 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7909
7910 return TRUE;
7911 }
7912
7913 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7914
7915 static struct elf_link_hash_entry *
7916 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7917 const char * name,
7918 bfd * input_bfd,
7919 bfd * output_bfd,
7920 asection * sym_sec,
7921 bfd_vma val,
7922 asection * s,
7923 char ** error_message)
7924 {
7925 bfd_vma my_offset;
7926 long int ret_offset;
7927 struct elf_link_hash_entry * myh;
7928 struct elf32_arm_link_hash_table * globals;
7929
7930 myh = find_arm_glue (info, name, error_message);
7931 if (myh == NULL)
7932 return NULL;
7933
7934 globals = elf32_arm_hash_table (info);
7935 BFD_ASSERT (globals != NULL);
7936 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7937
7938 my_offset = myh->root.u.def.value;
7939
7940 if ((my_offset & 0x01) == 0x01)
7941 {
7942 if (sym_sec != NULL
7943 && sym_sec->owner != NULL
7944 && !INTERWORK_FLAG (sym_sec->owner))
7945 {
7946 (*_bfd_error_handler)
7947 (_("%B(%s): warning: interworking not enabled.\n"
7948 " first occurrence: %B: arm call to thumb"),
7949 sym_sec->owner, input_bfd, name);
7950 }
7951
7952 --my_offset;
7953 myh->root.u.def.value = my_offset;
7954
7955 if (bfd_link_pic (info)
7956 || globals->root.is_relocatable_executable
7957 || globals->pic_veneer)
7958 {
7959 /* For relocatable objects we can't use absolute addresses,
7960 so construct the address from a relative offset. */
7961 /* TODO: If the offset is small it's probably worth
7962 constructing the address with adds. */
7963 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7964 s->contents + my_offset);
7965 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7966 s->contents + my_offset + 4);
7967 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7968 s->contents + my_offset + 8);
7969 /* Adjust the offset by 4 for the position of the add,
7970 and 8 for the pipeline offset. */
7971 ret_offset = (val - (s->output_offset
7972 + s->output_section->vma
7973 + my_offset + 12))
7974 | 1;
7975 bfd_put_32 (output_bfd, ret_offset,
7976 s->contents + my_offset + 12);
7977 }
7978 else if (globals->use_blx)
7979 {
7980 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7981 s->contents + my_offset);
7982
7983 /* It's a thumb address. Add the low order bit. */
7984 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7985 s->contents + my_offset + 4);
7986 }
7987 else
7988 {
7989 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7990 s->contents + my_offset);
7991
7992 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7993 s->contents + my_offset + 4);
7994
7995 /* It's a thumb address. Add the low order bit. */
7996 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7997 s->contents + my_offset + 8);
7998
7999 my_offset += 12;
8000 }
8001 }
8002
8003 BFD_ASSERT (my_offset <= globals->arm_glue_size);
8004
8005 return myh;
8006 }
8007
8008 /* Arm code calling a Thumb function. */
8009
8010 static int
8011 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8012 const char * name,
8013 bfd * input_bfd,
8014 bfd * output_bfd,
8015 asection * input_section,
8016 bfd_byte * hit_data,
8017 asection * sym_sec,
8018 bfd_vma offset,
8019 bfd_signed_vma addend,
8020 bfd_vma val,
8021 char **error_message)
8022 {
8023 unsigned long int tmp;
8024 bfd_vma my_offset;
8025 asection * s;
8026 long int ret_offset;
8027 struct elf_link_hash_entry * myh;
8028 struct elf32_arm_link_hash_table * globals;
8029
8030 globals = elf32_arm_hash_table (info);
8031 BFD_ASSERT (globals != NULL);
8032 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8033
8034 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8035 ARM2THUMB_GLUE_SECTION_NAME);
8036 BFD_ASSERT (s != NULL);
8037 BFD_ASSERT (s->contents != NULL);
8038 BFD_ASSERT (s->output_section != NULL);
8039
8040 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8041 sym_sec, val, s, error_message);
8042 if (!myh)
8043 return FALSE;
8044
8045 my_offset = myh->root.u.def.value;
8046 tmp = bfd_get_32 (input_bfd, hit_data);
8047 tmp = tmp & 0xFF000000;
8048
8049 /* Somehow these are both 4 too far, so subtract 8. */
8050 ret_offset = (s->output_offset
8051 + my_offset
8052 + s->output_section->vma
8053 - (input_section->output_offset
8054 + input_section->output_section->vma
8055 + offset + addend)
8056 - 8);
8057
8058 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8059
8060 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8061
8062 return TRUE;
8063 }
8064
8065 /* Populate Arm stub for an exported Thumb function. */
8066
8067 static bfd_boolean
8068 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8069 {
8070 struct bfd_link_info * info = (struct bfd_link_info *) inf;
8071 asection * s;
8072 struct elf_link_hash_entry * myh;
8073 struct elf32_arm_link_hash_entry *eh;
8074 struct elf32_arm_link_hash_table * globals;
8075 asection *sec;
8076 bfd_vma val;
8077 char *error_message;
8078
8079 eh = elf32_arm_hash_entry (h);
8080 /* Allocate stubs for exported Thumb functions on v4t. */
8081 if (eh->export_glue == NULL)
8082 return TRUE;
8083
8084 globals = elf32_arm_hash_table (info);
8085 BFD_ASSERT (globals != NULL);
8086 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8087
8088 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8089 ARM2THUMB_GLUE_SECTION_NAME);
8090 BFD_ASSERT (s != NULL);
8091 BFD_ASSERT (s->contents != NULL);
8092 BFD_ASSERT (s->output_section != NULL);
8093
8094 sec = eh->export_glue->root.u.def.section;
8095
8096 BFD_ASSERT (sec->output_section != NULL);
8097
8098 val = eh->export_glue->root.u.def.value + sec->output_offset
8099 + sec->output_section->vma;
8100
8101 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8102 h->root.u.def.section->owner,
8103 globals->obfd, sec, val, s,
8104 &error_message);
8105 BFD_ASSERT (myh);
8106 return TRUE;
8107 }
8108
8109 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8110
8111 static bfd_vma
8112 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8113 {
8114 bfd_byte *p;
8115 bfd_vma glue_addr;
8116 asection *s;
8117 struct elf32_arm_link_hash_table *globals;
8118
8119 globals = elf32_arm_hash_table (info);
8120 BFD_ASSERT (globals != NULL);
8121 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8122
8123 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8124 ARM_BX_GLUE_SECTION_NAME);
8125 BFD_ASSERT (s != NULL);
8126 BFD_ASSERT (s->contents != NULL);
8127 BFD_ASSERT (s->output_section != NULL);
8128
8129 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8130
8131 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8132
8133 if ((globals->bx_glue_offset[reg] & 1) == 0)
8134 {
8135 p = s->contents + glue_addr;
8136 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8137 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8138 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8139 globals->bx_glue_offset[reg] |= 1;
8140 }
8141
8142 return glue_addr + s->output_section->vma + s->output_offset;
8143 }
8144
8145 /* Generate Arm stubs for exported Thumb symbols. */
8146 static void
8147 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8148 struct bfd_link_info *link_info)
8149 {
8150 struct elf32_arm_link_hash_table * globals;
8151
8152 if (link_info == NULL)
8153 /* Ignore this if we are not called by the ELF backend linker. */
8154 return;
8155
8156 globals = elf32_arm_hash_table (link_info);
8157 if (globals == NULL)
8158 return;
8159
8160 /* If blx is available then exported Thumb symbols are OK and there is
8161 nothing to do. */
8162 if (globals->use_blx)
8163 return;
8164
8165 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8166 link_info);
8167 }
8168
8169 /* Reserve space for COUNT dynamic relocations in relocation selection
8170 SRELOC. */
8171
8172 static void
8173 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8174 bfd_size_type count)
8175 {
8176 struct elf32_arm_link_hash_table *htab;
8177
8178 htab = elf32_arm_hash_table (info);
8179 BFD_ASSERT (htab->root.dynamic_sections_created);
8180 if (sreloc == NULL)
8181 abort ();
8182 sreloc->size += RELOC_SIZE (htab) * count;
8183 }
8184
8185 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8186 dynamic, the relocations should go in SRELOC, otherwise they should
8187 go in the special .rel.iplt section. */
8188
8189 static void
8190 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8191 bfd_size_type count)
8192 {
8193 struct elf32_arm_link_hash_table *htab;
8194
8195 htab = elf32_arm_hash_table (info);
8196 if (!htab->root.dynamic_sections_created)
8197 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8198 else
8199 {
8200 BFD_ASSERT (sreloc != NULL);
8201 sreloc->size += RELOC_SIZE (htab) * count;
8202 }
8203 }
8204
8205 /* Add relocation REL to the end of relocation section SRELOC. */
8206
8207 static void
8208 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8209 asection *sreloc, Elf_Internal_Rela *rel)
8210 {
8211 bfd_byte *loc;
8212 struct elf32_arm_link_hash_table *htab;
8213
8214 htab = elf32_arm_hash_table (info);
8215 if (!htab->root.dynamic_sections_created
8216 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8217 sreloc = htab->root.irelplt;
8218 if (sreloc == NULL)
8219 abort ();
8220 loc = sreloc->contents;
8221 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8222 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8223 abort ();
8224 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8225 }
8226
8227 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8228 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8229 to .plt. */
8230
8231 static void
8232 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8233 bfd_boolean is_iplt_entry,
8234 union gotplt_union *root_plt,
8235 struct arm_plt_info *arm_plt)
8236 {
8237 struct elf32_arm_link_hash_table *htab;
8238 asection *splt;
8239 asection *sgotplt;
8240
8241 htab = elf32_arm_hash_table (info);
8242
8243 if (is_iplt_entry)
8244 {
8245 splt = htab->root.iplt;
8246 sgotplt = htab->root.igotplt;
8247
8248 /* NaCl uses a special first entry in .iplt too. */
8249 if (htab->nacl_p && splt->size == 0)
8250 splt->size += htab->plt_header_size;
8251
8252 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8253 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8254 }
8255 else
8256 {
8257 splt = htab->root.splt;
8258 sgotplt = htab->root.sgotplt;
8259
8260 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8261 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8262
8263 /* If this is the first .plt entry, make room for the special
8264 first entry. */
8265 if (splt->size == 0)
8266 splt->size += htab->plt_header_size;
8267
8268 htab->next_tls_desc_index++;
8269 }
8270
8271 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8272 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8273 splt->size += PLT_THUMB_STUB_SIZE;
8274 root_plt->offset = splt->size;
8275 splt->size += htab->plt_entry_size;
8276
8277 if (!htab->symbian_p)
8278 {
8279 /* We also need to make an entry in the .got.plt section, which
8280 will be placed in the .got section by the linker script. */
8281 if (is_iplt_entry)
8282 arm_plt->got_offset = sgotplt->size;
8283 else
8284 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8285 sgotplt->size += 4;
8286 }
8287 }
8288
8289 static bfd_vma
8290 arm_movw_immediate (bfd_vma value)
8291 {
8292 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8293 }
8294
8295 static bfd_vma
8296 arm_movt_immediate (bfd_vma value)
8297 {
8298 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8299 }
8300
8301 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8302 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8303 Otherwise, DYNINDX is the index of the symbol in the dynamic
8304 symbol table and SYM_VALUE is undefined.
8305
8306 ROOT_PLT points to the offset of the PLT entry from the start of its
8307 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8308 bookkeeping information.
8309
8310 Returns FALSE if there was a problem. */
8311
8312 static bfd_boolean
8313 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8314 union gotplt_union *root_plt,
8315 struct arm_plt_info *arm_plt,
8316 int dynindx, bfd_vma sym_value)
8317 {
8318 struct elf32_arm_link_hash_table *htab;
8319 asection *sgot;
8320 asection *splt;
8321 asection *srel;
8322 bfd_byte *loc;
8323 bfd_vma plt_index;
8324 Elf_Internal_Rela rel;
8325 bfd_vma plt_header_size;
8326 bfd_vma got_header_size;
8327
8328 htab = elf32_arm_hash_table (info);
8329
8330 /* Pick the appropriate sections and sizes. */
8331 if (dynindx == -1)
8332 {
8333 splt = htab->root.iplt;
8334 sgot = htab->root.igotplt;
8335 srel = htab->root.irelplt;
8336
8337 /* There are no reserved entries in .igot.plt, and no special
8338 first entry in .iplt. */
8339 got_header_size = 0;
8340 plt_header_size = 0;
8341 }
8342 else
8343 {
8344 splt = htab->root.splt;
8345 sgot = htab->root.sgotplt;
8346 srel = htab->root.srelplt;
8347
8348 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8349 plt_header_size = htab->plt_header_size;
8350 }
8351 BFD_ASSERT (splt != NULL && srel != NULL);
8352
8353 /* Fill in the entry in the procedure linkage table. */
8354 if (htab->symbian_p)
8355 {
8356 BFD_ASSERT (dynindx >= 0);
8357 put_arm_insn (htab, output_bfd,
8358 elf32_arm_symbian_plt_entry[0],
8359 splt->contents + root_plt->offset);
8360 bfd_put_32 (output_bfd,
8361 elf32_arm_symbian_plt_entry[1],
8362 splt->contents + root_plt->offset + 4);
8363
8364 /* Fill in the entry in the .rel.plt section. */
8365 rel.r_offset = (splt->output_section->vma
8366 + splt->output_offset
8367 + root_plt->offset + 4);
8368 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8369
8370 /* Get the index in the procedure linkage table which
8371 corresponds to this symbol. This is the index of this symbol
8372 in all the symbols for which we are making plt entries. The
8373 first entry in the procedure linkage table is reserved. */
8374 plt_index = ((root_plt->offset - plt_header_size)
8375 / htab->plt_entry_size);
8376 }
8377 else
8378 {
8379 bfd_vma got_offset, got_address, plt_address;
8380 bfd_vma got_displacement, initial_got_entry;
8381 bfd_byte * ptr;
8382
8383 BFD_ASSERT (sgot != NULL);
8384
8385 /* Get the offset into the .(i)got.plt table of the entry that
8386 corresponds to this function. */
8387 got_offset = (arm_plt->got_offset & -2);
8388
8389 /* Get the index in the procedure linkage table which
8390 corresponds to this symbol. This is the index of this symbol
8391 in all the symbols for which we are making plt entries.
8392 After the reserved .got.plt entries, all symbols appear in
8393 the same order as in .plt. */
8394 plt_index = (got_offset - got_header_size) / 4;
8395
8396 /* Calculate the address of the GOT entry. */
8397 got_address = (sgot->output_section->vma
8398 + sgot->output_offset
8399 + got_offset);
8400
8401 /* ...and the address of the PLT entry. */
8402 plt_address = (splt->output_section->vma
8403 + splt->output_offset
8404 + root_plt->offset);
8405
8406 ptr = splt->contents + root_plt->offset;
8407 if (htab->vxworks_p && bfd_link_pic (info))
8408 {
8409 unsigned int i;
8410 bfd_vma val;
8411
8412 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8413 {
8414 val = elf32_arm_vxworks_shared_plt_entry[i];
8415 if (i == 2)
8416 val |= got_address - sgot->output_section->vma;
8417 if (i == 5)
8418 val |= plt_index * RELOC_SIZE (htab);
8419 if (i == 2 || i == 5)
8420 bfd_put_32 (output_bfd, val, ptr);
8421 else
8422 put_arm_insn (htab, output_bfd, val, ptr);
8423 }
8424 }
8425 else if (htab->vxworks_p)
8426 {
8427 unsigned int i;
8428 bfd_vma val;
8429
8430 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8431 {
8432 val = elf32_arm_vxworks_exec_plt_entry[i];
8433 if (i == 2)
8434 val |= got_address;
8435 if (i == 4)
8436 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8437 if (i == 5)
8438 val |= plt_index * RELOC_SIZE (htab);
8439 if (i == 2 || i == 5)
8440 bfd_put_32 (output_bfd, val, ptr);
8441 else
8442 put_arm_insn (htab, output_bfd, val, ptr);
8443 }
8444
8445 loc = (htab->srelplt2->contents
8446 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8447
8448 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8449 referencing the GOT for this PLT entry. */
8450 rel.r_offset = plt_address + 8;
8451 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8452 rel.r_addend = got_offset;
8453 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8454 loc += RELOC_SIZE (htab);
8455
8456 /* Create the R_ARM_ABS32 relocation referencing the
8457 beginning of the PLT for this GOT entry. */
8458 rel.r_offset = got_address;
8459 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8460 rel.r_addend = 0;
8461 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8462 }
8463 else if (htab->nacl_p)
8464 {
8465 /* Calculate the displacement between the PLT slot and the
8466 common tail that's part of the special initial PLT slot. */
8467 int32_t tail_displacement
8468 = ((splt->output_section->vma + splt->output_offset
8469 + ARM_NACL_PLT_TAIL_OFFSET)
8470 - (plt_address + htab->plt_entry_size + 4));
8471 BFD_ASSERT ((tail_displacement & 3) == 0);
8472 tail_displacement >>= 2;
8473
8474 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8475 || (-tail_displacement & 0xff000000) == 0);
8476
8477 /* Calculate the displacement between the PLT slot and the entry
8478 in the GOT. The offset accounts for the value produced by
8479 adding to pc in the penultimate instruction of the PLT stub. */
8480 got_displacement = (got_address
8481 - (plt_address + htab->plt_entry_size));
8482
8483 /* NaCl does not support interworking at all. */
8484 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8485
8486 put_arm_insn (htab, output_bfd,
8487 elf32_arm_nacl_plt_entry[0]
8488 | arm_movw_immediate (got_displacement),
8489 ptr + 0);
8490 put_arm_insn (htab, output_bfd,
8491 elf32_arm_nacl_plt_entry[1]
8492 | arm_movt_immediate (got_displacement),
8493 ptr + 4);
8494 put_arm_insn (htab, output_bfd,
8495 elf32_arm_nacl_plt_entry[2],
8496 ptr + 8);
8497 put_arm_insn (htab, output_bfd,
8498 elf32_arm_nacl_plt_entry[3]
8499 | (tail_displacement & 0x00ffffff),
8500 ptr + 12);
8501 }
8502 else if (using_thumb_only (htab))
8503 {
8504 /* PR ld/16017: Generate thumb only PLT entries. */
8505 if (!using_thumb2 (htab))
8506 {
8507 /* FIXME: We ought to be able to generate thumb-1 PLT
8508 instructions... */
8509 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8510 output_bfd);
8511 return FALSE;
8512 }
8513
8514 /* Calculate the displacement between the PLT slot and the entry in
8515 the GOT. The 12-byte offset accounts for the value produced by
8516 adding to pc in the 3rd instruction of the PLT stub. */
8517 got_displacement = got_address - (plt_address + 12);
8518
8519 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8520 instead of 'put_thumb_insn'. */
8521 put_arm_insn (htab, output_bfd,
8522 elf32_thumb2_plt_entry[0]
8523 | ((got_displacement & 0x000000ff) << 16)
8524 | ((got_displacement & 0x00000700) << 20)
8525 | ((got_displacement & 0x00000800) >> 1)
8526 | ((got_displacement & 0x0000f000) >> 12),
8527 ptr + 0);
8528 put_arm_insn (htab, output_bfd,
8529 elf32_thumb2_plt_entry[1]
8530 | ((got_displacement & 0x00ff0000) )
8531 | ((got_displacement & 0x07000000) << 4)
8532 | ((got_displacement & 0x08000000) >> 17)
8533 | ((got_displacement & 0xf0000000) >> 28),
8534 ptr + 4);
8535 put_arm_insn (htab, output_bfd,
8536 elf32_thumb2_plt_entry[2],
8537 ptr + 8);
8538 put_arm_insn (htab, output_bfd,
8539 elf32_thumb2_plt_entry[3],
8540 ptr + 12);
8541 }
8542 else
8543 {
8544 /* Calculate the displacement between the PLT slot and the
8545 entry in the GOT. The eight-byte offset accounts for the
8546 value produced by adding to pc in the first instruction
8547 of the PLT stub. */
8548 got_displacement = got_address - (plt_address + 8);
8549
8550 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8551 {
8552 put_thumb_insn (htab, output_bfd,
8553 elf32_arm_plt_thumb_stub[0], ptr - 4);
8554 put_thumb_insn (htab, output_bfd,
8555 elf32_arm_plt_thumb_stub[1], ptr - 2);
8556 }
8557
8558 if (!elf32_arm_use_long_plt_entry)
8559 {
8560 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8561
8562 put_arm_insn (htab, output_bfd,
8563 elf32_arm_plt_entry_short[0]
8564 | ((got_displacement & 0x0ff00000) >> 20),
8565 ptr + 0);
8566 put_arm_insn (htab, output_bfd,
8567 elf32_arm_plt_entry_short[1]
8568 | ((got_displacement & 0x000ff000) >> 12),
8569 ptr+ 4);
8570 put_arm_insn (htab, output_bfd,
8571 elf32_arm_plt_entry_short[2]
8572 | (got_displacement & 0x00000fff),
8573 ptr + 8);
8574 #ifdef FOUR_WORD_PLT
8575 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8576 #endif
8577 }
8578 else
8579 {
8580 put_arm_insn (htab, output_bfd,
8581 elf32_arm_plt_entry_long[0]
8582 | ((got_displacement & 0xf0000000) >> 28),
8583 ptr + 0);
8584 put_arm_insn (htab, output_bfd,
8585 elf32_arm_plt_entry_long[1]
8586 | ((got_displacement & 0x0ff00000) >> 20),
8587 ptr + 4);
8588 put_arm_insn (htab, output_bfd,
8589 elf32_arm_plt_entry_long[2]
8590 | ((got_displacement & 0x000ff000) >> 12),
8591 ptr+ 8);
8592 put_arm_insn (htab, output_bfd,
8593 elf32_arm_plt_entry_long[3]
8594 | (got_displacement & 0x00000fff),
8595 ptr + 12);
8596 }
8597 }
8598
8599 /* Fill in the entry in the .rel(a).(i)plt section. */
8600 rel.r_offset = got_address;
8601 rel.r_addend = 0;
8602 if (dynindx == -1)
8603 {
8604 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8605 The dynamic linker or static executable then calls SYM_VALUE
8606 to determine the correct run-time value of the .igot.plt entry. */
8607 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8608 initial_got_entry = sym_value;
8609 }
8610 else
8611 {
8612 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8613 initial_got_entry = (splt->output_section->vma
8614 + splt->output_offset);
8615 }
8616
8617 /* Fill in the entry in the global offset table. */
8618 bfd_put_32 (output_bfd, initial_got_entry,
8619 sgot->contents + got_offset);
8620 }
8621
8622 if (dynindx == -1)
8623 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8624 else
8625 {
8626 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8627 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8628 }
8629
8630 return TRUE;
8631 }
8632
8633 /* Some relocations map to different relocations depending on the
8634 target. Return the real relocation. */
8635
8636 static int
8637 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8638 int r_type)
8639 {
8640 switch (r_type)
8641 {
8642 case R_ARM_TARGET1:
8643 if (globals->target1_is_rel)
8644 return R_ARM_REL32;
8645 else
8646 return R_ARM_ABS32;
8647
8648 case R_ARM_TARGET2:
8649 return globals->target2_reloc;
8650
8651 default:
8652 return r_type;
8653 }
8654 }
8655
8656 /* Return the base VMA address which should be subtracted from real addresses
8657 when resolving @dtpoff relocation.
8658 This is PT_TLS segment p_vaddr. */
8659
8660 static bfd_vma
8661 dtpoff_base (struct bfd_link_info *info)
8662 {
8663 /* If tls_sec is NULL, we should have signalled an error already. */
8664 if (elf_hash_table (info)->tls_sec == NULL)
8665 return 0;
8666 return elf_hash_table (info)->tls_sec->vma;
8667 }
8668
8669 /* Return the relocation value for @tpoff relocation
8670 if STT_TLS virtual address is ADDRESS. */
8671
8672 static bfd_vma
8673 tpoff (struct bfd_link_info *info, bfd_vma address)
8674 {
8675 struct elf_link_hash_table *htab = elf_hash_table (info);
8676 bfd_vma base;
8677
8678 /* If tls_sec is NULL, we should have signalled an error already. */
8679 if (htab->tls_sec == NULL)
8680 return 0;
8681 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8682 return address - htab->tls_sec->vma + base;
8683 }
8684
8685 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8686 VALUE is the relocation value. */
8687
8688 static bfd_reloc_status_type
8689 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8690 {
8691 if (value > 0xfff)
8692 return bfd_reloc_overflow;
8693
8694 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8695 bfd_put_32 (abfd, value, data);
8696 return bfd_reloc_ok;
8697 }
8698
8699 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8700 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8701 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8702
8703 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8704 is to then call final_link_relocate. Return other values in the
8705 case of error.
8706
8707 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8708 the pre-relaxed code. It would be nice if the relocs were updated
8709 to match the optimization. */
8710
8711 static bfd_reloc_status_type
8712 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8713 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8714 Elf_Internal_Rela *rel, unsigned long is_local)
8715 {
8716 unsigned long insn;
8717
8718 switch (ELF32_R_TYPE (rel->r_info))
8719 {
8720 default:
8721 return bfd_reloc_notsupported;
8722
8723 case R_ARM_TLS_GOTDESC:
8724 if (is_local)
8725 insn = 0;
8726 else
8727 {
8728 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8729 if (insn & 1)
8730 insn -= 5; /* THUMB */
8731 else
8732 insn -= 8; /* ARM */
8733 }
8734 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8735 return bfd_reloc_continue;
8736
8737 case R_ARM_THM_TLS_DESCSEQ:
8738 /* Thumb insn. */
8739 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8740 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8741 {
8742 if (is_local)
8743 /* nop */
8744 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8745 }
8746 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8747 {
8748 if (is_local)
8749 /* nop */
8750 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8751 else
8752 /* ldr rx,[ry] */
8753 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8754 }
8755 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8756 {
8757 if (is_local)
8758 /* nop */
8759 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8760 else
8761 /* mov r0, rx */
8762 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8763 contents + rel->r_offset);
8764 }
8765 else
8766 {
8767 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8768 /* It's a 32 bit instruction, fetch the rest of it for
8769 error generation. */
8770 insn = (insn << 16)
8771 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8772 (*_bfd_error_handler)
8773 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8774 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8775 return bfd_reloc_notsupported;
8776 }
8777 break;
8778
8779 case R_ARM_TLS_DESCSEQ:
8780 /* arm insn. */
8781 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8782 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8783 {
8784 if (is_local)
8785 /* mov rx, ry */
8786 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8787 contents + rel->r_offset);
8788 }
8789 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8790 {
8791 if (is_local)
8792 /* nop */
8793 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8794 else
8795 /* ldr rx,[ry] */
8796 bfd_put_32 (input_bfd, insn & 0xfffff000,
8797 contents + rel->r_offset);
8798 }
8799 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8800 {
8801 if (is_local)
8802 /* nop */
8803 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8804 else
8805 /* mov r0, rx */
8806 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8807 contents + rel->r_offset);
8808 }
8809 else
8810 {
8811 (*_bfd_error_handler)
8812 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8813 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8814 return bfd_reloc_notsupported;
8815 }
8816 break;
8817
8818 case R_ARM_TLS_CALL:
8819 /* GD->IE relaxation, turn the instruction into 'nop' or
8820 'ldr r0, [pc,r0]' */
8821 insn = is_local ? 0xe1a00000 : 0xe79f0000;
8822 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8823 break;
8824
8825 case R_ARM_THM_TLS_CALL:
8826 /* GD->IE relaxation. */
8827 if (!is_local)
8828 /* add r0,pc; ldr r0, [r0] */
8829 insn = 0x44786800;
8830 else if (arch_has_thumb2_nop (globals))
8831 /* nop.w */
8832 insn = 0xf3af8000;
8833 else
8834 /* nop; nop */
8835 insn = 0xbf00bf00;
8836
8837 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
8838 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
8839 break;
8840 }
8841 return bfd_reloc_ok;
8842 }
8843
8844 /* For a given value of n, calculate the value of G_n as required to
8845 deal with group relocations. We return it in the form of an
8846 encoded constant-and-rotation, together with the final residual. If n is
8847 specified as less than zero, then final_residual is filled with the
8848 input value and no further action is performed. */
8849
8850 static bfd_vma
8851 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
8852 {
8853 int current_n;
8854 bfd_vma g_n;
8855 bfd_vma encoded_g_n = 0;
8856 bfd_vma residual = value; /* Also known as Y_n. */
8857
8858 for (current_n = 0; current_n <= n; current_n++)
8859 {
8860 int shift;
8861
8862 /* Calculate which part of the value to mask. */
8863 if (residual == 0)
8864 shift = 0;
8865 else
8866 {
8867 int msb;
8868
8869 /* Determine the most significant bit in the residual and
8870 align the resulting value to a 2-bit boundary. */
8871 for (msb = 30; msb >= 0; msb -= 2)
8872 if (residual & (3 << msb))
8873 break;
8874
8875 /* The desired shift is now (msb - 6), or zero, whichever
8876 is the greater. */
8877 shift = msb - 6;
8878 if (shift < 0)
8879 shift = 0;
8880 }
8881
8882 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
8883 g_n = residual & (0xff << shift);
8884 encoded_g_n = (g_n >> shift)
8885 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
8886
8887 /* Calculate the residual for the next time around. */
8888 residual &= ~g_n;
8889 }
8890
8891 *final_residual = residual;
8892
8893 return encoded_g_n;
8894 }
8895
8896 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8897 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8898
8899 static int
8900 identify_add_or_sub (bfd_vma insn)
8901 {
8902 int opcode = insn & 0x1e00000;
8903
8904 if (opcode == 1 << 23) /* ADD */
8905 return 1;
8906
8907 if (opcode == 1 << 22) /* SUB */
8908 return -1;
8909
8910 return 0;
8911 }
8912
8913 /* Perform a relocation as part of a final link. */
8914
8915 static bfd_reloc_status_type
8916 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8917 bfd * input_bfd,
8918 bfd * output_bfd,
8919 asection * input_section,
8920 bfd_byte * contents,
8921 Elf_Internal_Rela * rel,
8922 bfd_vma value,
8923 struct bfd_link_info * info,
8924 asection * sym_sec,
8925 const char * sym_name,
8926 unsigned char st_type,
8927 enum arm_st_branch_type branch_type,
8928 struct elf_link_hash_entry * h,
8929 bfd_boolean * unresolved_reloc_p,
8930 char ** error_message)
8931 {
8932 unsigned long r_type = howto->type;
8933 unsigned long r_symndx;
8934 bfd_byte * hit_data = contents + rel->r_offset;
8935 bfd_vma * local_got_offsets;
8936 bfd_vma * local_tlsdesc_gotents;
8937 asection * sgot;
8938 asection * splt;
8939 asection * sreloc = NULL;
8940 asection * srelgot;
8941 bfd_vma addend;
8942 bfd_signed_vma signed_addend;
8943 unsigned char dynreloc_st_type;
8944 bfd_vma dynreloc_value;
8945 struct elf32_arm_link_hash_table * globals;
8946 struct elf32_arm_link_hash_entry *eh;
8947 union gotplt_union *root_plt;
8948 struct arm_plt_info *arm_plt;
8949 bfd_vma plt_offset;
8950 bfd_vma gotplt_offset;
8951 bfd_boolean has_iplt_entry;
8952
8953 globals = elf32_arm_hash_table (info);
8954 if (globals == NULL)
8955 return bfd_reloc_notsupported;
8956
8957 BFD_ASSERT (is_arm_elf (input_bfd));
8958
8959 /* Some relocation types map to different relocations depending on the
8960 target. We pick the right one here. */
8961 r_type = arm_real_reloc_type (globals, r_type);
8962
8963 /* It is possible to have linker relaxations on some TLS access
8964 models. Update our information here. */
8965 r_type = elf32_arm_tls_transition (info, r_type, h);
8966
8967 if (r_type != howto->type)
8968 howto = elf32_arm_howto_from_type (r_type);
8969
8970 eh = (struct elf32_arm_link_hash_entry *) h;
8971 sgot = globals->root.sgot;
8972 local_got_offsets = elf_local_got_offsets (input_bfd);
8973 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8974
8975 if (globals->root.dynamic_sections_created)
8976 srelgot = globals->root.srelgot;
8977 else
8978 srelgot = NULL;
8979
8980 r_symndx = ELF32_R_SYM (rel->r_info);
8981
8982 if (globals->use_rel)
8983 {
8984 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8985
8986 if (addend & ((howto->src_mask + 1) >> 1))
8987 {
8988 signed_addend = -1;
8989 signed_addend &= ~ howto->src_mask;
8990 signed_addend |= addend;
8991 }
8992 else
8993 signed_addend = addend;
8994 }
8995 else
8996 addend = signed_addend = rel->r_addend;
8997
8998 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
8999 are resolving a function call relocation. */
9000 if (using_thumb_only (globals)
9001 && (r_type == R_ARM_THM_CALL
9002 || r_type == R_ARM_THM_JUMP24)
9003 && branch_type == ST_BRANCH_TO_ARM)
9004 branch_type = ST_BRANCH_TO_THUMB;
9005
9006 /* Record the symbol information that should be used in dynamic
9007 relocations. */
9008 dynreloc_st_type = st_type;
9009 dynreloc_value = value;
9010 if (branch_type == ST_BRANCH_TO_THUMB)
9011 dynreloc_value |= 1;
9012
9013 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9014 VALUE appropriately for relocations that we resolve at link time. */
9015 has_iplt_entry = FALSE;
9016 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9017 && root_plt->offset != (bfd_vma) -1)
9018 {
9019 plt_offset = root_plt->offset;
9020 gotplt_offset = arm_plt->got_offset;
9021
9022 if (h == NULL || eh->is_iplt)
9023 {
9024 has_iplt_entry = TRUE;
9025 splt = globals->root.iplt;
9026
9027 /* Populate .iplt entries here, because not all of them will
9028 be seen by finish_dynamic_symbol. The lower bit is set if
9029 we have already populated the entry. */
9030 if (plt_offset & 1)
9031 plt_offset--;
9032 else
9033 {
9034 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9035 -1, dynreloc_value))
9036 root_plt->offset |= 1;
9037 else
9038 return bfd_reloc_notsupported;
9039 }
9040
9041 /* Static relocations always resolve to the .iplt entry. */
9042 st_type = STT_FUNC;
9043 value = (splt->output_section->vma
9044 + splt->output_offset
9045 + plt_offset);
9046 branch_type = ST_BRANCH_TO_ARM;
9047
9048 /* If there are non-call relocations that resolve to the .iplt
9049 entry, then all dynamic ones must too. */
9050 if (arm_plt->noncall_refcount != 0)
9051 {
9052 dynreloc_st_type = st_type;
9053 dynreloc_value = value;
9054 }
9055 }
9056 else
9057 /* We populate the .plt entry in finish_dynamic_symbol. */
9058 splt = globals->root.splt;
9059 }
9060 else
9061 {
9062 splt = NULL;
9063 plt_offset = (bfd_vma) -1;
9064 gotplt_offset = (bfd_vma) -1;
9065 }
9066
9067 switch (r_type)
9068 {
9069 case R_ARM_NONE:
9070 /* We don't need to find a value for this symbol. It's just a
9071 marker. */
9072 *unresolved_reloc_p = FALSE;
9073 return bfd_reloc_ok;
9074
9075 case R_ARM_ABS12:
9076 if (!globals->vxworks_p)
9077 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9078
9079 case R_ARM_PC24:
9080 case R_ARM_ABS32:
9081 case R_ARM_ABS32_NOI:
9082 case R_ARM_REL32:
9083 case R_ARM_REL32_NOI:
9084 case R_ARM_CALL:
9085 case R_ARM_JUMP24:
9086 case R_ARM_XPC25:
9087 case R_ARM_PREL31:
9088 case R_ARM_PLT32:
9089 /* Handle relocations which should use the PLT entry. ABS32/REL32
9090 will use the symbol's value, which may point to a PLT entry, but we
9091 don't need to handle that here. If we created a PLT entry, all
9092 branches in this object should go to it, except if the PLT is too
9093 far away, in which case a long branch stub should be inserted. */
9094 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9095 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9096 && r_type != R_ARM_CALL
9097 && r_type != R_ARM_JUMP24
9098 && r_type != R_ARM_PLT32)
9099 && plt_offset != (bfd_vma) -1)
9100 {
9101 /* If we've created a .plt section, and assigned a PLT entry
9102 to this function, it must either be a STT_GNU_IFUNC reference
9103 or not be known to bind locally. In other cases, we should
9104 have cleared the PLT entry by now. */
9105 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9106
9107 value = (splt->output_section->vma
9108 + splt->output_offset
9109 + plt_offset);
9110 *unresolved_reloc_p = FALSE;
9111 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9112 contents, rel->r_offset, value,
9113 rel->r_addend);
9114 }
9115
9116 /* When generating a shared object or relocatable executable, these
9117 relocations are copied into the output file to be resolved at
9118 run time. */
9119 if ((bfd_link_pic (info)
9120 || globals->root.is_relocatable_executable)
9121 && (input_section->flags & SEC_ALLOC)
9122 && !(globals->vxworks_p
9123 && strcmp (input_section->output_section->name,
9124 ".tls_vars") == 0)
9125 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9126 || !SYMBOL_CALLS_LOCAL (info, h))
9127 && !(input_bfd == globals->stub_bfd
9128 && strstr (input_section->name, STUB_SUFFIX))
9129 && (h == NULL
9130 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9131 || h->root.type != bfd_link_hash_undefweak)
9132 && r_type != R_ARM_PC24
9133 && r_type != R_ARM_CALL
9134 && r_type != R_ARM_JUMP24
9135 && r_type != R_ARM_PREL31
9136 && r_type != R_ARM_PLT32)
9137 {
9138 Elf_Internal_Rela outrel;
9139 bfd_boolean skip, relocate;
9140
9141 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9142 && !h->def_regular)
9143 {
9144 char *v = _("shared object");
9145
9146 if (bfd_link_executable (info))
9147 v = _("PIE executable");
9148
9149 (*_bfd_error_handler)
9150 (_("%B: relocation %s against external or undefined symbol `%s'"
9151 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9152 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9153 return bfd_reloc_notsupported;
9154 }
9155
9156 *unresolved_reloc_p = FALSE;
9157
9158 if (sreloc == NULL && globals->root.dynamic_sections_created)
9159 {
9160 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9161 ! globals->use_rel);
9162
9163 if (sreloc == NULL)
9164 return bfd_reloc_notsupported;
9165 }
9166
9167 skip = FALSE;
9168 relocate = FALSE;
9169
9170 outrel.r_addend = addend;
9171 outrel.r_offset =
9172 _bfd_elf_section_offset (output_bfd, info, input_section,
9173 rel->r_offset);
9174 if (outrel.r_offset == (bfd_vma) -1)
9175 skip = TRUE;
9176 else if (outrel.r_offset == (bfd_vma) -2)
9177 skip = TRUE, relocate = TRUE;
9178 outrel.r_offset += (input_section->output_section->vma
9179 + input_section->output_offset);
9180
9181 if (skip)
9182 memset (&outrel, 0, sizeof outrel);
9183 else if (h != NULL
9184 && h->dynindx != -1
9185 && (!bfd_link_pic (info)
9186 || !SYMBOLIC_BIND (info, h)
9187 || !h->def_regular))
9188 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9189 else
9190 {
9191 int symbol;
9192
9193 /* This symbol is local, or marked to become local. */
9194 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9195 if (globals->symbian_p)
9196 {
9197 asection *osec;
9198
9199 /* On Symbian OS, the data segment and text segement
9200 can be relocated independently. Therefore, we
9201 must indicate the segment to which this
9202 relocation is relative. The BPABI allows us to
9203 use any symbol in the right segment; we just use
9204 the section symbol as it is convenient. (We
9205 cannot use the symbol given by "h" directly as it
9206 will not appear in the dynamic symbol table.)
9207
9208 Note that the dynamic linker ignores the section
9209 symbol value, so we don't subtract osec->vma
9210 from the emitted reloc addend. */
9211 if (sym_sec)
9212 osec = sym_sec->output_section;
9213 else
9214 osec = input_section->output_section;
9215 symbol = elf_section_data (osec)->dynindx;
9216 if (symbol == 0)
9217 {
9218 struct elf_link_hash_table *htab = elf_hash_table (info);
9219
9220 if ((osec->flags & SEC_READONLY) == 0
9221 && htab->data_index_section != NULL)
9222 osec = htab->data_index_section;
9223 else
9224 osec = htab->text_index_section;
9225 symbol = elf_section_data (osec)->dynindx;
9226 }
9227 BFD_ASSERT (symbol != 0);
9228 }
9229 else
9230 /* On SVR4-ish systems, the dynamic loader cannot
9231 relocate the text and data segments independently,
9232 so the symbol does not matter. */
9233 symbol = 0;
9234 if (dynreloc_st_type == STT_GNU_IFUNC)
9235 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9236 to the .iplt entry. Instead, every non-call reference
9237 must use an R_ARM_IRELATIVE relocation to obtain the
9238 correct run-time address. */
9239 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9240 else
9241 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9242 if (globals->use_rel)
9243 relocate = TRUE;
9244 else
9245 outrel.r_addend += dynreloc_value;
9246 }
9247
9248 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9249
9250 /* If this reloc is against an external symbol, we do not want to
9251 fiddle with the addend. Otherwise, we need to include the symbol
9252 value so that it becomes an addend for the dynamic reloc. */
9253 if (! relocate)
9254 return bfd_reloc_ok;
9255
9256 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9257 contents, rel->r_offset,
9258 dynreloc_value, (bfd_vma) 0);
9259 }
9260 else switch (r_type)
9261 {
9262 case R_ARM_ABS12:
9263 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9264
9265 case R_ARM_XPC25: /* Arm BLX instruction. */
9266 case R_ARM_CALL:
9267 case R_ARM_JUMP24:
9268 case R_ARM_PC24: /* Arm B/BL instruction. */
9269 case R_ARM_PLT32:
9270 {
9271 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9272
9273 if (r_type == R_ARM_XPC25)
9274 {
9275 /* Check for Arm calling Arm function. */
9276 /* FIXME: Should we translate the instruction into a BL
9277 instruction instead ? */
9278 if (branch_type != ST_BRANCH_TO_THUMB)
9279 (*_bfd_error_handler)
9280 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9281 input_bfd,
9282 h ? h->root.root.string : "(local)");
9283 }
9284 else if (r_type == R_ARM_PC24)
9285 {
9286 /* Check for Arm calling Thumb function. */
9287 if (branch_type == ST_BRANCH_TO_THUMB)
9288 {
9289 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9290 output_bfd, input_section,
9291 hit_data, sym_sec, rel->r_offset,
9292 signed_addend, value,
9293 error_message))
9294 return bfd_reloc_ok;
9295 else
9296 return bfd_reloc_dangerous;
9297 }
9298 }
9299
9300 /* Check if a stub has to be inserted because the
9301 destination is too far or we are changing mode. */
9302 if ( r_type == R_ARM_CALL
9303 || r_type == R_ARM_JUMP24
9304 || r_type == R_ARM_PLT32)
9305 {
9306 enum elf32_arm_stub_type stub_type = arm_stub_none;
9307 struct elf32_arm_link_hash_entry *hash;
9308
9309 hash = (struct elf32_arm_link_hash_entry *) h;
9310 stub_type = arm_type_of_stub (info, input_section, rel,
9311 st_type, &branch_type,
9312 hash, value, sym_sec,
9313 input_bfd, sym_name);
9314
9315 if (stub_type != arm_stub_none)
9316 {
9317 /* The target is out of reach, so redirect the
9318 branch to the local stub for this function. */
9319 stub_entry = elf32_arm_get_stub_entry (input_section,
9320 sym_sec, h,
9321 rel, globals,
9322 stub_type);
9323 {
9324 if (stub_entry != NULL)
9325 value = (stub_entry->stub_offset
9326 + stub_entry->stub_sec->output_offset
9327 + stub_entry->stub_sec->output_section->vma);
9328
9329 if (plt_offset != (bfd_vma) -1)
9330 *unresolved_reloc_p = FALSE;
9331 }
9332 }
9333 else
9334 {
9335 /* If the call goes through a PLT entry, make sure to
9336 check distance to the right destination address. */
9337 if (plt_offset != (bfd_vma) -1)
9338 {
9339 value = (splt->output_section->vma
9340 + splt->output_offset
9341 + plt_offset);
9342 *unresolved_reloc_p = FALSE;
9343 /* The PLT entry is in ARM mode, regardless of the
9344 target function. */
9345 branch_type = ST_BRANCH_TO_ARM;
9346 }
9347 }
9348 }
9349
9350 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9351 where:
9352 S is the address of the symbol in the relocation.
9353 P is address of the instruction being relocated.
9354 A is the addend (extracted from the instruction) in bytes.
9355
9356 S is held in 'value'.
9357 P is the base address of the section containing the
9358 instruction plus the offset of the reloc into that
9359 section, ie:
9360 (input_section->output_section->vma +
9361 input_section->output_offset +
9362 rel->r_offset).
9363 A is the addend, converted into bytes, ie:
9364 (signed_addend * 4)
9365
9366 Note: None of these operations have knowledge of the pipeline
9367 size of the processor, thus it is up to the assembler to
9368 encode this information into the addend. */
9369 value -= (input_section->output_section->vma
9370 + input_section->output_offset);
9371 value -= rel->r_offset;
9372 if (globals->use_rel)
9373 value += (signed_addend << howto->size);
9374 else
9375 /* RELA addends do not have to be adjusted by howto->size. */
9376 value += signed_addend;
9377
9378 signed_addend = value;
9379 signed_addend >>= howto->rightshift;
9380
9381 /* A branch to an undefined weak symbol is turned into a jump to
9382 the next instruction unless a PLT entry will be created.
9383 Do the same for local undefined symbols (but not for STN_UNDEF).
9384 The jump to the next instruction is optimized as a NOP depending
9385 on the architecture. */
9386 if (h ? (h->root.type == bfd_link_hash_undefweak
9387 && plt_offset == (bfd_vma) -1)
9388 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9389 {
9390 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9391
9392 if (arch_has_arm_nop (globals))
9393 value |= 0x0320f000;
9394 else
9395 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9396 }
9397 else
9398 {
9399 /* Perform a signed range check. */
9400 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9401 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9402 return bfd_reloc_overflow;
9403
9404 addend = (value & 2);
9405
9406 value = (signed_addend & howto->dst_mask)
9407 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9408
9409 if (r_type == R_ARM_CALL)
9410 {
9411 /* Set the H bit in the BLX instruction. */
9412 if (branch_type == ST_BRANCH_TO_THUMB)
9413 {
9414 if (addend)
9415 value |= (1 << 24);
9416 else
9417 value &= ~(bfd_vma)(1 << 24);
9418 }
9419
9420 /* Select the correct instruction (BL or BLX). */
9421 /* Only if we are not handling a BL to a stub. In this
9422 case, mode switching is performed by the stub. */
9423 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9424 value |= (1 << 28);
9425 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9426 {
9427 value &= ~(bfd_vma)(1 << 28);
9428 value |= (1 << 24);
9429 }
9430 }
9431 }
9432 }
9433 break;
9434
9435 case R_ARM_ABS32:
9436 value += addend;
9437 if (branch_type == ST_BRANCH_TO_THUMB)
9438 value |= 1;
9439 break;
9440
9441 case R_ARM_ABS32_NOI:
9442 value += addend;
9443 break;
9444
9445 case R_ARM_REL32:
9446 value += addend;
9447 if (branch_type == ST_BRANCH_TO_THUMB)
9448 value |= 1;
9449 value -= (input_section->output_section->vma
9450 + input_section->output_offset + rel->r_offset);
9451 break;
9452
9453 case R_ARM_REL32_NOI:
9454 value += addend;
9455 value -= (input_section->output_section->vma
9456 + input_section->output_offset + rel->r_offset);
9457 break;
9458
9459 case R_ARM_PREL31:
9460 value -= (input_section->output_section->vma
9461 + input_section->output_offset + rel->r_offset);
9462 value += signed_addend;
9463 if (! h || h->root.type != bfd_link_hash_undefweak)
9464 {
9465 /* Check for overflow. */
9466 if ((value ^ (value >> 1)) & (1 << 30))
9467 return bfd_reloc_overflow;
9468 }
9469 value &= 0x7fffffff;
9470 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9471 if (branch_type == ST_BRANCH_TO_THUMB)
9472 value |= 1;
9473 break;
9474 }
9475
9476 bfd_put_32 (input_bfd, value, hit_data);
9477 return bfd_reloc_ok;
9478
9479 case R_ARM_ABS8:
9480 /* PR 16202: Refectch the addend using the correct size. */
9481 if (globals->use_rel)
9482 addend = bfd_get_8 (input_bfd, hit_data);
9483 value += addend;
9484
9485 /* There is no way to tell whether the user intended to use a signed or
9486 unsigned addend. When checking for overflow we accept either,
9487 as specified by the AAELF. */
9488 if ((long) value > 0xff || (long) value < -0x80)
9489 return bfd_reloc_overflow;
9490
9491 bfd_put_8 (input_bfd, value, hit_data);
9492 return bfd_reloc_ok;
9493
9494 case R_ARM_ABS16:
9495 /* PR 16202: Refectch the addend using the correct size. */
9496 if (globals->use_rel)
9497 addend = bfd_get_16 (input_bfd, hit_data);
9498 value += addend;
9499
9500 /* See comment for R_ARM_ABS8. */
9501 if ((long) value > 0xffff || (long) value < -0x8000)
9502 return bfd_reloc_overflow;
9503
9504 bfd_put_16 (input_bfd, value, hit_data);
9505 return bfd_reloc_ok;
9506
9507 case R_ARM_THM_ABS5:
9508 /* Support ldr and str instructions for the thumb. */
9509 if (globals->use_rel)
9510 {
9511 /* Need to refetch addend. */
9512 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9513 /* ??? Need to determine shift amount from operand size. */
9514 addend >>= howto->rightshift;
9515 }
9516 value += addend;
9517
9518 /* ??? Isn't value unsigned? */
9519 if ((long) value > 0x1f || (long) value < -0x10)
9520 return bfd_reloc_overflow;
9521
9522 /* ??? Value needs to be properly shifted into place first. */
9523 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9524 bfd_put_16 (input_bfd, value, hit_data);
9525 return bfd_reloc_ok;
9526
9527 case R_ARM_THM_ALU_PREL_11_0:
9528 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9529 {
9530 bfd_vma insn;
9531 bfd_signed_vma relocation;
9532
9533 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9534 | bfd_get_16 (input_bfd, hit_data + 2);
9535
9536 if (globals->use_rel)
9537 {
9538 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9539 | ((insn & (1 << 26)) >> 15);
9540 if (insn & 0xf00000)
9541 signed_addend = -signed_addend;
9542 }
9543
9544 relocation = value + signed_addend;
9545 relocation -= Pa (input_section->output_section->vma
9546 + input_section->output_offset
9547 + rel->r_offset);
9548
9549 value = relocation;
9550
9551 if (value >= 0x1000)
9552 return bfd_reloc_overflow;
9553
9554 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9555 | ((value & 0x700) << 4)
9556 | ((value & 0x800) << 15);
9557 if (relocation < 0)
9558 insn |= 0xa00000;
9559
9560 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9561 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9562
9563 return bfd_reloc_ok;
9564 }
9565
9566 case R_ARM_THM_PC8:
9567 /* PR 10073: This reloc is not generated by the GNU toolchain,
9568 but it is supported for compatibility with third party libraries
9569 generated by other compilers, specifically the ARM/IAR. */
9570 {
9571 bfd_vma insn;
9572 bfd_signed_vma relocation;
9573
9574 insn = bfd_get_16 (input_bfd, hit_data);
9575
9576 if (globals->use_rel)
9577 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9578
9579 relocation = value + addend;
9580 relocation -= Pa (input_section->output_section->vma
9581 + input_section->output_offset
9582 + rel->r_offset);
9583
9584 value = relocation;
9585
9586 /* We do not check for overflow of this reloc. Although strictly
9587 speaking this is incorrect, it appears to be necessary in order
9588 to work with IAR generated relocs. Since GCC and GAS do not
9589 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9590 a problem for them. */
9591 value &= 0x3fc;
9592
9593 insn = (insn & 0xff00) | (value >> 2);
9594
9595 bfd_put_16 (input_bfd, insn, hit_data);
9596
9597 return bfd_reloc_ok;
9598 }
9599
9600 case R_ARM_THM_PC12:
9601 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9602 {
9603 bfd_vma insn;
9604 bfd_signed_vma relocation;
9605
9606 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9607 | bfd_get_16 (input_bfd, hit_data + 2);
9608
9609 if (globals->use_rel)
9610 {
9611 signed_addend = insn & 0xfff;
9612 if (!(insn & (1 << 23)))
9613 signed_addend = -signed_addend;
9614 }
9615
9616 relocation = value + signed_addend;
9617 relocation -= Pa (input_section->output_section->vma
9618 + input_section->output_offset
9619 + rel->r_offset);
9620
9621 value = relocation;
9622
9623 if (value >= 0x1000)
9624 return bfd_reloc_overflow;
9625
9626 insn = (insn & 0xff7ff000) | value;
9627 if (relocation >= 0)
9628 insn |= (1 << 23);
9629
9630 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9631 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9632
9633 return bfd_reloc_ok;
9634 }
9635
9636 case R_ARM_THM_XPC22:
9637 case R_ARM_THM_CALL:
9638 case R_ARM_THM_JUMP24:
9639 /* Thumb BL (branch long instruction). */
9640 {
9641 bfd_vma relocation;
9642 bfd_vma reloc_sign;
9643 bfd_boolean overflow = FALSE;
9644 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9645 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9646 bfd_signed_vma reloc_signed_max;
9647 bfd_signed_vma reloc_signed_min;
9648 bfd_vma check;
9649 bfd_signed_vma signed_check;
9650 int bitsize;
9651 const int thumb2 = using_thumb2 (globals);
9652
9653 /* A branch to an undefined weak symbol is turned into a jump to
9654 the next instruction unless a PLT entry will be created.
9655 The jump to the next instruction is optimized as a NOP.W for
9656 Thumb-2 enabled architectures. */
9657 if (h && h->root.type == bfd_link_hash_undefweak
9658 && plt_offset == (bfd_vma) -1)
9659 {
9660 if (arch_has_thumb2_nop (globals))
9661 {
9662 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9663 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9664 }
9665 else
9666 {
9667 bfd_put_16 (input_bfd, 0xe000, hit_data);
9668 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9669 }
9670 return bfd_reloc_ok;
9671 }
9672
9673 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9674 with Thumb-1) involving the J1 and J2 bits. */
9675 if (globals->use_rel)
9676 {
9677 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9678 bfd_vma upper = upper_insn & 0x3ff;
9679 bfd_vma lower = lower_insn & 0x7ff;
9680 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9681 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9682 bfd_vma i1 = j1 ^ s ? 0 : 1;
9683 bfd_vma i2 = j2 ^ s ? 0 : 1;
9684
9685 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9686 /* Sign extend. */
9687 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9688
9689 signed_addend = addend;
9690 }
9691
9692 if (r_type == R_ARM_THM_XPC22)
9693 {
9694 /* Check for Thumb to Thumb call. */
9695 /* FIXME: Should we translate the instruction into a BL
9696 instruction instead ? */
9697 if (branch_type == ST_BRANCH_TO_THUMB)
9698 (*_bfd_error_handler)
9699 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9700 input_bfd,
9701 h ? h->root.root.string : "(local)");
9702 }
9703 else
9704 {
9705 /* If it is not a call to Thumb, assume call to Arm.
9706 If it is a call relative to a section name, then it is not a
9707 function call at all, but rather a long jump. Calls through
9708 the PLT do not require stubs. */
9709 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9710 {
9711 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9712 {
9713 /* Convert BL to BLX. */
9714 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9715 }
9716 else if (( r_type != R_ARM_THM_CALL)
9717 && (r_type != R_ARM_THM_JUMP24))
9718 {
9719 if (elf32_thumb_to_arm_stub
9720 (info, sym_name, input_bfd, output_bfd, input_section,
9721 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9722 error_message))
9723 return bfd_reloc_ok;
9724 else
9725 return bfd_reloc_dangerous;
9726 }
9727 }
9728 else if (branch_type == ST_BRANCH_TO_THUMB
9729 && globals->use_blx
9730 && r_type == R_ARM_THM_CALL)
9731 {
9732 /* Make sure this is a BL. */
9733 lower_insn |= 0x1800;
9734 }
9735 }
9736
9737 enum elf32_arm_stub_type stub_type = arm_stub_none;
9738 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9739 {
9740 /* Check if a stub has to be inserted because the destination
9741 is too far. */
9742 struct elf32_arm_stub_hash_entry *stub_entry;
9743 struct elf32_arm_link_hash_entry *hash;
9744
9745 hash = (struct elf32_arm_link_hash_entry *) h;
9746
9747 stub_type = arm_type_of_stub (info, input_section, rel,
9748 st_type, &branch_type,
9749 hash, value, sym_sec,
9750 input_bfd, sym_name);
9751
9752 if (stub_type != arm_stub_none)
9753 {
9754 /* The target is out of reach or we are changing modes, so
9755 redirect the branch to the local stub for this
9756 function. */
9757 stub_entry = elf32_arm_get_stub_entry (input_section,
9758 sym_sec, h,
9759 rel, globals,
9760 stub_type);
9761 if (stub_entry != NULL)
9762 {
9763 value = (stub_entry->stub_offset
9764 + stub_entry->stub_sec->output_offset
9765 + stub_entry->stub_sec->output_section->vma);
9766
9767 if (plt_offset != (bfd_vma) -1)
9768 *unresolved_reloc_p = FALSE;
9769 }
9770
9771 /* If this call becomes a call to Arm, force BLX. */
9772 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9773 {
9774 if ((stub_entry
9775 && !arm_stub_is_thumb (stub_entry->stub_type))
9776 || branch_type != ST_BRANCH_TO_THUMB)
9777 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9778 }
9779 }
9780 }
9781
9782 /* Handle calls via the PLT. */
9783 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9784 {
9785 value = (splt->output_section->vma
9786 + splt->output_offset
9787 + plt_offset);
9788
9789 if (globals->use_blx
9790 && r_type == R_ARM_THM_CALL
9791 && ! using_thumb_only (globals))
9792 {
9793 /* If the Thumb BLX instruction is available, convert
9794 the BL to a BLX instruction to call the ARM-mode
9795 PLT entry. */
9796 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9797 branch_type = ST_BRANCH_TO_ARM;
9798 }
9799 else
9800 {
9801 if (! using_thumb_only (globals))
9802 /* Target the Thumb stub before the ARM PLT entry. */
9803 value -= PLT_THUMB_STUB_SIZE;
9804 branch_type = ST_BRANCH_TO_THUMB;
9805 }
9806 *unresolved_reloc_p = FALSE;
9807 }
9808
9809 relocation = value + signed_addend;
9810
9811 relocation -= (input_section->output_section->vma
9812 + input_section->output_offset
9813 + rel->r_offset);
9814
9815 check = relocation >> howto->rightshift;
9816
9817 /* If this is a signed value, the rightshift just dropped
9818 leading 1 bits (assuming twos complement). */
9819 if ((bfd_signed_vma) relocation >= 0)
9820 signed_check = check;
9821 else
9822 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
9823
9824 /* Calculate the permissable maximum and minimum values for
9825 this relocation according to whether we're relocating for
9826 Thumb-2 or not. */
9827 bitsize = howto->bitsize;
9828 if (!thumb2)
9829 bitsize -= 2;
9830 reloc_signed_max = (1 << (bitsize - 1)) - 1;
9831 reloc_signed_min = ~reloc_signed_max;
9832
9833 /* Assumes two's complement. */
9834 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9835 overflow = TRUE;
9836
9837 if ((lower_insn & 0x5000) == 0x4000)
9838 /* For a BLX instruction, make sure that the relocation is rounded up
9839 to a word boundary. This follows the semantics of the instruction
9840 which specifies that bit 1 of the target address will come from bit
9841 1 of the base address. */
9842 relocation = (relocation + 2) & ~ 3;
9843
9844 /* Put RELOCATION back into the insn. Assumes two's complement.
9845 We use the Thumb-2 encoding, which is safe even if dealing with
9846 a Thumb-1 instruction by virtue of our overflow check above. */
9847 reloc_sign = (signed_check < 0) ? 1 : 0;
9848 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
9849 | ((relocation >> 12) & 0x3ff)
9850 | (reloc_sign << 10);
9851 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
9852 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
9853 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
9854 | ((relocation >> 1) & 0x7ff);
9855
9856 /* Put the relocated value back in the object file: */
9857 bfd_put_16 (input_bfd, upper_insn, hit_data);
9858 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9859
9860 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9861 }
9862 break;
9863
9864 case R_ARM_THM_JUMP19:
9865 /* Thumb32 conditional branch instruction. */
9866 {
9867 bfd_vma relocation;
9868 bfd_boolean overflow = FALSE;
9869 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9870 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9871 bfd_signed_vma reloc_signed_max = 0xffffe;
9872 bfd_signed_vma reloc_signed_min = -0x100000;
9873 bfd_signed_vma signed_check;
9874 enum elf32_arm_stub_type stub_type = arm_stub_none;
9875 struct elf32_arm_stub_hash_entry *stub_entry;
9876 struct elf32_arm_link_hash_entry *hash;
9877
9878 /* Need to refetch the addend, reconstruct the top three bits,
9879 and squish the two 11 bit pieces together. */
9880 if (globals->use_rel)
9881 {
9882 bfd_vma S = (upper_insn & 0x0400) >> 10;
9883 bfd_vma upper = (upper_insn & 0x003f);
9884 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
9885 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
9886 bfd_vma lower = (lower_insn & 0x07ff);
9887
9888 upper |= J1 << 6;
9889 upper |= J2 << 7;
9890 upper |= (!S) << 8;
9891 upper -= 0x0100; /* Sign extend. */
9892
9893 addend = (upper << 12) | (lower << 1);
9894 signed_addend = addend;
9895 }
9896
9897 /* Handle calls via the PLT. */
9898 if (plt_offset != (bfd_vma) -1)
9899 {
9900 value = (splt->output_section->vma
9901 + splt->output_offset
9902 + plt_offset);
9903 /* Target the Thumb stub before the ARM PLT entry. */
9904 value -= PLT_THUMB_STUB_SIZE;
9905 *unresolved_reloc_p = FALSE;
9906 }
9907
9908 hash = (struct elf32_arm_link_hash_entry *)h;
9909
9910 stub_type = arm_type_of_stub (info, input_section, rel,
9911 st_type, &branch_type,
9912 hash, value, sym_sec,
9913 input_bfd, sym_name);
9914 if (stub_type != arm_stub_none)
9915 {
9916 stub_entry = elf32_arm_get_stub_entry (input_section,
9917 sym_sec, h,
9918 rel, globals,
9919 stub_type);
9920 if (stub_entry != NULL)
9921 {
9922 value = (stub_entry->stub_offset
9923 + stub_entry->stub_sec->output_offset
9924 + stub_entry->stub_sec->output_section->vma);
9925 }
9926 }
9927
9928 relocation = value + signed_addend;
9929 relocation -= (input_section->output_section->vma
9930 + input_section->output_offset
9931 + rel->r_offset);
9932 signed_check = (bfd_signed_vma) relocation;
9933
9934 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9935 overflow = TRUE;
9936
9937 /* Put RELOCATION back into the insn. */
9938 {
9939 bfd_vma S = (relocation & 0x00100000) >> 20;
9940 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9941 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9942 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9943 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9944
9945 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9946 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9947 }
9948
9949 /* Put the relocated value back in the object file: */
9950 bfd_put_16 (input_bfd, upper_insn, hit_data);
9951 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9952
9953 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9954 }
9955
9956 case R_ARM_THM_JUMP11:
9957 case R_ARM_THM_JUMP8:
9958 case R_ARM_THM_JUMP6:
9959 /* Thumb B (branch) instruction). */
9960 {
9961 bfd_signed_vma relocation;
9962 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9963 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9964 bfd_signed_vma signed_check;
9965
9966 /* CZB cannot jump backward. */
9967 if (r_type == R_ARM_THM_JUMP6)
9968 reloc_signed_min = 0;
9969
9970 if (globals->use_rel)
9971 {
9972 /* Need to refetch addend. */
9973 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9974 if (addend & ((howto->src_mask + 1) >> 1))
9975 {
9976 signed_addend = -1;
9977 signed_addend &= ~ howto->src_mask;
9978 signed_addend |= addend;
9979 }
9980 else
9981 signed_addend = addend;
9982 /* The value in the insn has been right shifted. We need to
9983 undo this, so that we can perform the address calculation
9984 in terms of bytes. */
9985 signed_addend <<= howto->rightshift;
9986 }
9987 relocation = value + signed_addend;
9988
9989 relocation -= (input_section->output_section->vma
9990 + input_section->output_offset
9991 + rel->r_offset);
9992
9993 relocation >>= howto->rightshift;
9994 signed_check = relocation;
9995
9996 if (r_type == R_ARM_THM_JUMP6)
9997 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9998 else
9999 relocation &= howto->dst_mask;
10000 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10001
10002 bfd_put_16 (input_bfd, relocation, hit_data);
10003
10004 /* Assumes two's complement. */
10005 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10006 return bfd_reloc_overflow;
10007
10008 return bfd_reloc_ok;
10009 }
10010
10011 case R_ARM_ALU_PCREL7_0:
10012 case R_ARM_ALU_PCREL15_8:
10013 case R_ARM_ALU_PCREL23_15:
10014 {
10015 bfd_vma insn;
10016 bfd_vma relocation;
10017
10018 insn = bfd_get_32 (input_bfd, hit_data);
10019 if (globals->use_rel)
10020 {
10021 /* Extract the addend. */
10022 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10023 signed_addend = addend;
10024 }
10025 relocation = value + signed_addend;
10026
10027 relocation -= (input_section->output_section->vma
10028 + input_section->output_offset
10029 + rel->r_offset);
10030 insn = (insn & ~0xfff)
10031 | ((howto->bitpos << 7) & 0xf00)
10032 | ((relocation >> howto->bitpos) & 0xff);
10033 bfd_put_32 (input_bfd, value, hit_data);
10034 }
10035 return bfd_reloc_ok;
10036
10037 case R_ARM_GNU_VTINHERIT:
10038 case R_ARM_GNU_VTENTRY:
10039 return bfd_reloc_ok;
10040
10041 case R_ARM_GOTOFF32:
10042 /* Relocation is relative to the start of the
10043 global offset table. */
10044
10045 BFD_ASSERT (sgot != NULL);
10046 if (sgot == NULL)
10047 return bfd_reloc_notsupported;
10048
10049 /* If we are addressing a Thumb function, we need to adjust the
10050 address by one, so that attempts to call the function pointer will
10051 correctly interpret it as Thumb code. */
10052 if (branch_type == ST_BRANCH_TO_THUMB)
10053 value += 1;
10054
10055 /* Note that sgot->output_offset is not involved in this
10056 calculation. We always want the start of .got. If we
10057 define _GLOBAL_OFFSET_TABLE in a different way, as is
10058 permitted by the ABI, we might have to change this
10059 calculation. */
10060 value -= sgot->output_section->vma;
10061 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10062 contents, rel->r_offset, value,
10063 rel->r_addend);
10064
10065 case R_ARM_GOTPC:
10066 /* Use global offset table as symbol value. */
10067 BFD_ASSERT (sgot != NULL);
10068
10069 if (sgot == NULL)
10070 return bfd_reloc_notsupported;
10071
10072 *unresolved_reloc_p = FALSE;
10073 value = sgot->output_section->vma;
10074 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10075 contents, rel->r_offset, value,
10076 rel->r_addend);
10077
10078 case R_ARM_GOT32:
10079 case R_ARM_GOT_PREL:
10080 /* Relocation is to the entry for this symbol in the
10081 global offset table. */
10082 if (sgot == NULL)
10083 return bfd_reloc_notsupported;
10084
10085 if (dynreloc_st_type == STT_GNU_IFUNC
10086 && plt_offset != (bfd_vma) -1
10087 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10088 {
10089 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10090 symbol, and the relocation resolves directly to the runtime
10091 target rather than to the .iplt entry. This means that any
10092 .got entry would be the same value as the .igot.plt entry,
10093 so there's no point creating both. */
10094 sgot = globals->root.igotplt;
10095 value = sgot->output_offset + gotplt_offset;
10096 }
10097 else if (h != NULL)
10098 {
10099 bfd_vma off;
10100
10101 off = h->got.offset;
10102 BFD_ASSERT (off != (bfd_vma) -1);
10103 if ((off & 1) != 0)
10104 {
10105 /* We have already processsed one GOT relocation against
10106 this symbol. */
10107 off &= ~1;
10108 if (globals->root.dynamic_sections_created
10109 && !SYMBOL_REFERENCES_LOCAL (info, h))
10110 *unresolved_reloc_p = FALSE;
10111 }
10112 else
10113 {
10114 Elf_Internal_Rela outrel;
10115
10116 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10117 {
10118 /* If the symbol doesn't resolve locally in a static
10119 object, we have an undefined reference. If the
10120 symbol doesn't resolve locally in a dynamic object,
10121 it should be resolved by the dynamic linker. */
10122 if (globals->root.dynamic_sections_created)
10123 {
10124 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10125 *unresolved_reloc_p = FALSE;
10126 }
10127 else
10128 outrel.r_info = 0;
10129 outrel.r_addend = 0;
10130 }
10131 else
10132 {
10133 if (dynreloc_st_type == STT_GNU_IFUNC)
10134 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10135 else if (bfd_link_pic (info) &&
10136 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10137 || h->root.type != bfd_link_hash_undefweak))
10138 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10139 else
10140 outrel.r_info = 0;
10141 outrel.r_addend = dynreloc_value;
10142 }
10143
10144 /* The GOT entry is initialized to zero by default.
10145 See if we should install a different value. */
10146 if (outrel.r_addend != 0
10147 && (outrel.r_info == 0 || globals->use_rel))
10148 {
10149 bfd_put_32 (output_bfd, outrel.r_addend,
10150 sgot->contents + off);
10151 outrel.r_addend = 0;
10152 }
10153
10154 if (outrel.r_info != 0)
10155 {
10156 outrel.r_offset = (sgot->output_section->vma
10157 + sgot->output_offset
10158 + off);
10159 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10160 }
10161 h->got.offset |= 1;
10162 }
10163 value = sgot->output_offset + off;
10164 }
10165 else
10166 {
10167 bfd_vma off;
10168
10169 BFD_ASSERT (local_got_offsets != NULL &&
10170 local_got_offsets[r_symndx] != (bfd_vma) -1);
10171
10172 off = local_got_offsets[r_symndx];
10173
10174 /* The offset must always be a multiple of 4. We use the
10175 least significant bit to record whether we have already
10176 generated the necessary reloc. */
10177 if ((off & 1) != 0)
10178 off &= ~1;
10179 else
10180 {
10181 if (globals->use_rel)
10182 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10183
10184 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10185 {
10186 Elf_Internal_Rela outrel;
10187
10188 outrel.r_addend = addend + dynreloc_value;
10189 outrel.r_offset = (sgot->output_section->vma
10190 + sgot->output_offset
10191 + off);
10192 if (dynreloc_st_type == STT_GNU_IFUNC)
10193 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10194 else
10195 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10196 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10197 }
10198
10199 local_got_offsets[r_symndx] |= 1;
10200 }
10201
10202 value = sgot->output_offset + off;
10203 }
10204 if (r_type != R_ARM_GOT32)
10205 value += sgot->output_section->vma;
10206
10207 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10208 contents, rel->r_offset, value,
10209 rel->r_addend);
10210
10211 case R_ARM_TLS_LDO32:
10212 value = value - dtpoff_base (info);
10213
10214 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10215 contents, rel->r_offset, value,
10216 rel->r_addend);
10217
10218 case R_ARM_TLS_LDM32:
10219 {
10220 bfd_vma off;
10221
10222 if (sgot == NULL)
10223 abort ();
10224
10225 off = globals->tls_ldm_got.offset;
10226
10227 if ((off & 1) != 0)
10228 off &= ~1;
10229 else
10230 {
10231 /* If we don't know the module number, create a relocation
10232 for it. */
10233 if (bfd_link_pic (info))
10234 {
10235 Elf_Internal_Rela outrel;
10236
10237 if (srelgot == NULL)
10238 abort ();
10239
10240 outrel.r_addend = 0;
10241 outrel.r_offset = (sgot->output_section->vma
10242 + sgot->output_offset + off);
10243 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10244
10245 if (globals->use_rel)
10246 bfd_put_32 (output_bfd, outrel.r_addend,
10247 sgot->contents + off);
10248
10249 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10250 }
10251 else
10252 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10253
10254 globals->tls_ldm_got.offset |= 1;
10255 }
10256
10257 value = sgot->output_section->vma + sgot->output_offset + off
10258 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10259
10260 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10261 contents, rel->r_offset, value,
10262 rel->r_addend);
10263 }
10264
10265 case R_ARM_TLS_CALL:
10266 case R_ARM_THM_TLS_CALL:
10267 case R_ARM_TLS_GD32:
10268 case R_ARM_TLS_IE32:
10269 case R_ARM_TLS_GOTDESC:
10270 case R_ARM_TLS_DESCSEQ:
10271 case R_ARM_THM_TLS_DESCSEQ:
10272 {
10273 bfd_vma off, offplt;
10274 int indx = 0;
10275 char tls_type;
10276
10277 BFD_ASSERT (sgot != NULL);
10278
10279 if (h != NULL)
10280 {
10281 bfd_boolean dyn;
10282 dyn = globals->root.dynamic_sections_created;
10283 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10284 bfd_link_pic (info),
10285 h)
10286 && (!bfd_link_pic (info)
10287 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10288 {
10289 *unresolved_reloc_p = FALSE;
10290 indx = h->dynindx;
10291 }
10292 off = h->got.offset;
10293 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10294 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10295 }
10296 else
10297 {
10298 BFD_ASSERT (local_got_offsets != NULL);
10299 off = local_got_offsets[r_symndx];
10300 offplt = local_tlsdesc_gotents[r_symndx];
10301 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10302 }
10303
10304 /* Linker relaxations happens from one of the
10305 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10306 if (ELF32_R_TYPE(rel->r_info) != r_type)
10307 tls_type = GOT_TLS_IE;
10308
10309 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10310
10311 if ((off & 1) != 0)
10312 off &= ~1;
10313 else
10314 {
10315 bfd_boolean need_relocs = FALSE;
10316 Elf_Internal_Rela outrel;
10317 int cur_off = off;
10318
10319 /* The GOT entries have not been initialized yet. Do it
10320 now, and emit any relocations. If both an IE GOT and a
10321 GD GOT are necessary, we emit the GD first. */
10322
10323 if ((bfd_link_pic (info) || indx != 0)
10324 && (h == NULL
10325 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10326 || h->root.type != bfd_link_hash_undefweak))
10327 {
10328 need_relocs = TRUE;
10329 BFD_ASSERT (srelgot != NULL);
10330 }
10331
10332 if (tls_type & GOT_TLS_GDESC)
10333 {
10334 bfd_byte *loc;
10335
10336 /* We should have relaxed, unless this is an undefined
10337 weak symbol. */
10338 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10339 || bfd_link_pic (info));
10340 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10341 <= globals->root.sgotplt->size);
10342
10343 outrel.r_addend = 0;
10344 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10345 + globals->root.sgotplt->output_offset
10346 + offplt
10347 + globals->sgotplt_jump_table_size);
10348
10349 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10350 sreloc = globals->root.srelplt;
10351 loc = sreloc->contents;
10352 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10353 BFD_ASSERT (loc + RELOC_SIZE (globals)
10354 <= sreloc->contents + sreloc->size);
10355
10356 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10357
10358 /* For globals, the first word in the relocation gets
10359 the relocation index and the top bit set, or zero,
10360 if we're binding now. For locals, it gets the
10361 symbol's offset in the tls section. */
10362 bfd_put_32 (output_bfd,
10363 !h ? value - elf_hash_table (info)->tls_sec->vma
10364 : info->flags & DF_BIND_NOW ? 0
10365 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10366 globals->root.sgotplt->contents + offplt
10367 + globals->sgotplt_jump_table_size);
10368
10369 /* Second word in the relocation is always zero. */
10370 bfd_put_32 (output_bfd, 0,
10371 globals->root.sgotplt->contents + offplt
10372 + globals->sgotplt_jump_table_size + 4);
10373 }
10374 if (tls_type & GOT_TLS_GD)
10375 {
10376 if (need_relocs)
10377 {
10378 outrel.r_addend = 0;
10379 outrel.r_offset = (sgot->output_section->vma
10380 + sgot->output_offset
10381 + cur_off);
10382 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10383
10384 if (globals->use_rel)
10385 bfd_put_32 (output_bfd, outrel.r_addend,
10386 sgot->contents + cur_off);
10387
10388 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10389
10390 if (indx == 0)
10391 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10392 sgot->contents + cur_off + 4);
10393 else
10394 {
10395 outrel.r_addend = 0;
10396 outrel.r_info = ELF32_R_INFO (indx,
10397 R_ARM_TLS_DTPOFF32);
10398 outrel.r_offset += 4;
10399
10400 if (globals->use_rel)
10401 bfd_put_32 (output_bfd, outrel.r_addend,
10402 sgot->contents + cur_off + 4);
10403
10404 elf32_arm_add_dynreloc (output_bfd, info,
10405 srelgot, &outrel);
10406 }
10407 }
10408 else
10409 {
10410 /* If we are not emitting relocations for a
10411 general dynamic reference, then we must be in a
10412 static link or an executable link with the
10413 symbol binding locally. Mark it as belonging
10414 to module 1, the executable. */
10415 bfd_put_32 (output_bfd, 1,
10416 sgot->contents + cur_off);
10417 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10418 sgot->contents + cur_off + 4);
10419 }
10420
10421 cur_off += 8;
10422 }
10423
10424 if (tls_type & GOT_TLS_IE)
10425 {
10426 if (need_relocs)
10427 {
10428 if (indx == 0)
10429 outrel.r_addend = value - dtpoff_base (info);
10430 else
10431 outrel.r_addend = 0;
10432 outrel.r_offset = (sgot->output_section->vma
10433 + sgot->output_offset
10434 + cur_off);
10435 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10436
10437 if (globals->use_rel)
10438 bfd_put_32 (output_bfd, outrel.r_addend,
10439 sgot->contents + cur_off);
10440
10441 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10442 }
10443 else
10444 bfd_put_32 (output_bfd, tpoff (info, value),
10445 sgot->contents + cur_off);
10446 cur_off += 4;
10447 }
10448
10449 if (h != NULL)
10450 h->got.offset |= 1;
10451 else
10452 local_got_offsets[r_symndx] |= 1;
10453 }
10454
10455 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10456 off += 8;
10457 else if (tls_type & GOT_TLS_GDESC)
10458 off = offplt;
10459
10460 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10461 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10462 {
10463 bfd_signed_vma offset;
10464 /* TLS stubs are arm mode. The original symbol is a
10465 data object, so branch_type is bogus. */
10466 branch_type = ST_BRANCH_TO_ARM;
10467 enum elf32_arm_stub_type stub_type
10468 = arm_type_of_stub (info, input_section, rel,
10469 st_type, &branch_type,
10470 (struct elf32_arm_link_hash_entry *)h,
10471 globals->tls_trampoline, globals->root.splt,
10472 input_bfd, sym_name);
10473
10474 if (stub_type != arm_stub_none)
10475 {
10476 struct elf32_arm_stub_hash_entry *stub_entry
10477 = elf32_arm_get_stub_entry
10478 (input_section, globals->root.splt, 0, rel,
10479 globals, stub_type);
10480 offset = (stub_entry->stub_offset
10481 + stub_entry->stub_sec->output_offset
10482 + stub_entry->stub_sec->output_section->vma);
10483 }
10484 else
10485 offset = (globals->root.splt->output_section->vma
10486 + globals->root.splt->output_offset
10487 + globals->tls_trampoline);
10488
10489 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10490 {
10491 unsigned long inst;
10492
10493 offset -= (input_section->output_section->vma
10494 + input_section->output_offset
10495 + rel->r_offset + 8);
10496
10497 inst = offset >> 2;
10498 inst &= 0x00ffffff;
10499 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10500 }
10501 else
10502 {
10503 /* Thumb blx encodes the offset in a complicated
10504 fashion. */
10505 unsigned upper_insn, lower_insn;
10506 unsigned neg;
10507
10508 offset -= (input_section->output_section->vma
10509 + input_section->output_offset
10510 + rel->r_offset + 4);
10511
10512 if (stub_type != arm_stub_none
10513 && arm_stub_is_thumb (stub_type))
10514 {
10515 lower_insn = 0xd000;
10516 }
10517 else
10518 {
10519 lower_insn = 0xc000;
10520 /* Round up the offset to a word boundary. */
10521 offset = (offset + 2) & ~2;
10522 }
10523
10524 neg = offset < 0;
10525 upper_insn = (0xf000
10526 | ((offset >> 12) & 0x3ff)
10527 | (neg << 10));
10528 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10529 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10530 | ((offset >> 1) & 0x7ff);
10531 bfd_put_16 (input_bfd, upper_insn, hit_data);
10532 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10533 return bfd_reloc_ok;
10534 }
10535 }
10536 /* These relocations needs special care, as besides the fact
10537 they point somewhere in .gotplt, the addend must be
10538 adjusted accordingly depending on the type of instruction
10539 we refer to. */
10540 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10541 {
10542 unsigned long data, insn;
10543 unsigned thumb;
10544
10545 data = bfd_get_32 (input_bfd, hit_data);
10546 thumb = data & 1;
10547 data &= ~1u;
10548
10549 if (thumb)
10550 {
10551 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10552 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10553 insn = (insn << 16)
10554 | bfd_get_16 (input_bfd,
10555 contents + rel->r_offset - data + 2);
10556 if ((insn & 0xf800c000) == 0xf000c000)
10557 /* bl/blx */
10558 value = -6;
10559 else if ((insn & 0xffffff00) == 0x4400)
10560 /* add */
10561 value = -5;
10562 else
10563 {
10564 (*_bfd_error_handler)
10565 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10566 input_bfd, input_section,
10567 (unsigned long)rel->r_offset, insn);
10568 return bfd_reloc_notsupported;
10569 }
10570 }
10571 else
10572 {
10573 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10574
10575 switch (insn >> 24)
10576 {
10577 case 0xeb: /* bl */
10578 case 0xfa: /* blx */
10579 value = -4;
10580 break;
10581
10582 case 0xe0: /* add */
10583 value = -8;
10584 break;
10585
10586 default:
10587 (*_bfd_error_handler)
10588 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10589 input_bfd, input_section,
10590 (unsigned long)rel->r_offset, insn);
10591 return bfd_reloc_notsupported;
10592 }
10593 }
10594
10595 value += ((globals->root.sgotplt->output_section->vma
10596 + globals->root.sgotplt->output_offset + off)
10597 - (input_section->output_section->vma
10598 + input_section->output_offset
10599 + rel->r_offset)
10600 + globals->sgotplt_jump_table_size);
10601 }
10602 else
10603 value = ((globals->root.sgot->output_section->vma
10604 + globals->root.sgot->output_offset + off)
10605 - (input_section->output_section->vma
10606 + input_section->output_offset + rel->r_offset));
10607
10608 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10609 contents, rel->r_offset, value,
10610 rel->r_addend);
10611 }
10612
10613 case R_ARM_TLS_LE32:
10614 if (bfd_link_dll (info))
10615 {
10616 (*_bfd_error_handler)
10617 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10618 input_bfd, input_section,
10619 (long) rel->r_offset, howto->name);
10620 return bfd_reloc_notsupported;
10621 }
10622 else
10623 value = tpoff (info, value);
10624
10625 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10626 contents, rel->r_offset, value,
10627 rel->r_addend);
10628
10629 case R_ARM_V4BX:
10630 if (globals->fix_v4bx)
10631 {
10632 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10633
10634 /* Ensure that we have a BX instruction. */
10635 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10636
10637 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10638 {
10639 /* Branch to veneer. */
10640 bfd_vma glue_addr;
10641 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10642 glue_addr -= input_section->output_section->vma
10643 + input_section->output_offset
10644 + rel->r_offset + 8;
10645 insn = (insn & 0xf0000000) | 0x0a000000
10646 | ((glue_addr >> 2) & 0x00ffffff);
10647 }
10648 else
10649 {
10650 /* Preserve Rm (lowest four bits) and the condition code
10651 (highest four bits). Other bits encode MOV PC,Rm. */
10652 insn = (insn & 0xf000000f) | 0x01a0f000;
10653 }
10654
10655 bfd_put_32 (input_bfd, insn, hit_data);
10656 }
10657 return bfd_reloc_ok;
10658
10659 case R_ARM_MOVW_ABS_NC:
10660 case R_ARM_MOVT_ABS:
10661 case R_ARM_MOVW_PREL_NC:
10662 case R_ARM_MOVT_PREL:
10663 /* Until we properly support segment-base-relative addressing then
10664 we assume the segment base to be zero, as for the group relocations.
10665 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10666 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10667 case R_ARM_MOVW_BREL_NC:
10668 case R_ARM_MOVW_BREL:
10669 case R_ARM_MOVT_BREL:
10670 {
10671 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10672
10673 if (globals->use_rel)
10674 {
10675 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10676 signed_addend = (addend ^ 0x8000) - 0x8000;
10677 }
10678
10679 value += signed_addend;
10680
10681 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10682 value -= (input_section->output_section->vma
10683 + input_section->output_offset + rel->r_offset);
10684
10685 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10686 return bfd_reloc_overflow;
10687
10688 if (branch_type == ST_BRANCH_TO_THUMB)
10689 value |= 1;
10690
10691 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10692 || r_type == R_ARM_MOVT_BREL)
10693 value >>= 16;
10694
10695 insn &= 0xfff0f000;
10696 insn |= value & 0xfff;
10697 insn |= (value & 0xf000) << 4;
10698 bfd_put_32 (input_bfd, insn, hit_data);
10699 }
10700 return bfd_reloc_ok;
10701
10702 case R_ARM_THM_MOVW_ABS_NC:
10703 case R_ARM_THM_MOVT_ABS:
10704 case R_ARM_THM_MOVW_PREL_NC:
10705 case R_ARM_THM_MOVT_PREL:
10706 /* Until we properly support segment-base-relative addressing then
10707 we assume the segment base to be zero, as for the above relocations.
10708 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10709 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10710 as R_ARM_THM_MOVT_ABS. */
10711 case R_ARM_THM_MOVW_BREL_NC:
10712 case R_ARM_THM_MOVW_BREL:
10713 case R_ARM_THM_MOVT_BREL:
10714 {
10715 bfd_vma insn;
10716
10717 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10718 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10719
10720 if (globals->use_rel)
10721 {
10722 addend = ((insn >> 4) & 0xf000)
10723 | ((insn >> 15) & 0x0800)
10724 | ((insn >> 4) & 0x0700)
10725 | (insn & 0x00ff);
10726 signed_addend = (addend ^ 0x8000) - 0x8000;
10727 }
10728
10729 value += signed_addend;
10730
10731 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10732 value -= (input_section->output_section->vma
10733 + input_section->output_offset + rel->r_offset);
10734
10735 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10736 return bfd_reloc_overflow;
10737
10738 if (branch_type == ST_BRANCH_TO_THUMB)
10739 value |= 1;
10740
10741 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10742 || r_type == R_ARM_THM_MOVT_BREL)
10743 value >>= 16;
10744
10745 insn &= 0xfbf08f00;
10746 insn |= (value & 0xf000) << 4;
10747 insn |= (value & 0x0800) << 15;
10748 insn |= (value & 0x0700) << 4;
10749 insn |= (value & 0x00ff);
10750
10751 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10752 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10753 }
10754 return bfd_reloc_ok;
10755
10756 case R_ARM_ALU_PC_G0_NC:
10757 case R_ARM_ALU_PC_G1_NC:
10758 case R_ARM_ALU_PC_G0:
10759 case R_ARM_ALU_PC_G1:
10760 case R_ARM_ALU_PC_G2:
10761 case R_ARM_ALU_SB_G0_NC:
10762 case R_ARM_ALU_SB_G1_NC:
10763 case R_ARM_ALU_SB_G0:
10764 case R_ARM_ALU_SB_G1:
10765 case R_ARM_ALU_SB_G2:
10766 {
10767 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10768 bfd_vma pc = input_section->output_section->vma
10769 + input_section->output_offset + rel->r_offset;
10770 /* sb is the origin of the *segment* containing the symbol. */
10771 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10772 bfd_vma residual;
10773 bfd_vma g_n;
10774 bfd_signed_vma signed_value;
10775 int group = 0;
10776
10777 /* Determine which group of bits to select. */
10778 switch (r_type)
10779 {
10780 case R_ARM_ALU_PC_G0_NC:
10781 case R_ARM_ALU_PC_G0:
10782 case R_ARM_ALU_SB_G0_NC:
10783 case R_ARM_ALU_SB_G0:
10784 group = 0;
10785 break;
10786
10787 case R_ARM_ALU_PC_G1_NC:
10788 case R_ARM_ALU_PC_G1:
10789 case R_ARM_ALU_SB_G1_NC:
10790 case R_ARM_ALU_SB_G1:
10791 group = 1;
10792 break;
10793
10794 case R_ARM_ALU_PC_G2:
10795 case R_ARM_ALU_SB_G2:
10796 group = 2;
10797 break;
10798
10799 default:
10800 abort ();
10801 }
10802
10803 /* If REL, extract the addend from the insn. If RELA, it will
10804 have already been fetched for us. */
10805 if (globals->use_rel)
10806 {
10807 int negative;
10808 bfd_vma constant = insn & 0xff;
10809 bfd_vma rotation = (insn & 0xf00) >> 8;
10810
10811 if (rotation == 0)
10812 signed_addend = constant;
10813 else
10814 {
10815 /* Compensate for the fact that in the instruction, the
10816 rotation is stored in multiples of 2 bits. */
10817 rotation *= 2;
10818
10819 /* Rotate "constant" right by "rotation" bits. */
10820 signed_addend = (constant >> rotation) |
10821 (constant << (8 * sizeof (bfd_vma) - rotation));
10822 }
10823
10824 /* Determine if the instruction is an ADD or a SUB.
10825 (For REL, this determines the sign of the addend.) */
10826 negative = identify_add_or_sub (insn);
10827 if (negative == 0)
10828 {
10829 (*_bfd_error_handler)
10830 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10831 input_bfd, input_section,
10832 (long) rel->r_offset, howto->name);
10833 return bfd_reloc_overflow;
10834 }
10835
10836 signed_addend *= negative;
10837 }
10838
10839 /* Compute the value (X) to go in the place. */
10840 if (r_type == R_ARM_ALU_PC_G0_NC
10841 || r_type == R_ARM_ALU_PC_G1_NC
10842 || r_type == R_ARM_ALU_PC_G0
10843 || r_type == R_ARM_ALU_PC_G1
10844 || r_type == R_ARM_ALU_PC_G2)
10845 /* PC relative. */
10846 signed_value = value - pc + signed_addend;
10847 else
10848 /* Section base relative. */
10849 signed_value = value - sb + signed_addend;
10850
10851 /* If the target symbol is a Thumb function, then set the
10852 Thumb bit in the address. */
10853 if (branch_type == ST_BRANCH_TO_THUMB)
10854 signed_value |= 1;
10855
10856 /* Calculate the value of the relevant G_n, in encoded
10857 constant-with-rotation format. */
10858 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10859 group, &residual);
10860
10861 /* Check for overflow if required. */
10862 if ((r_type == R_ARM_ALU_PC_G0
10863 || r_type == R_ARM_ALU_PC_G1
10864 || r_type == R_ARM_ALU_PC_G2
10865 || r_type == R_ARM_ALU_SB_G0
10866 || r_type == R_ARM_ALU_SB_G1
10867 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
10868 {
10869 (*_bfd_error_handler)
10870 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10871 input_bfd, input_section,
10872 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
10873 howto->name);
10874 return bfd_reloc_overflow;
10875 }
10876
10877 /* Mask out the value and the ADD/SUB part of the opcode; take care
10878 not to destroy the S bit. */
10879 insn &= 0xff1ff000;
10880
10881 /* Set the opcode according to whether the value to go in the
10882 place is negative. */
10883 if (signed_value < 0)
10884 insn |= 1 << 22;
10885 else
10886 insn |= 1 << 23;
10887
10888 /* Encode the offset. */
10889 insn |= g_n;
10890
10891 bfd_put_32 (input_bfd, insn, hit_data);
10892 }
10893 return bfd_reloc_ok;
10894
10895 case R_ARM_LDR_PC_G0:
10896 case R_ARM_LDR_PC_G1:
10897 case R_ARM_LDR_PC_G2:
10898 case R_ARM_LDR_SB_G0:
10899 case R_ARM_LDR_SB_G1:
10900 case R_ARM_LDR_SB_G2:
10901 {
10902 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10903 bfd_vma pc = input_section->output_section->vma
10904 + input_section->output_offset + rel->r_offset;
10905 /* sb is the origin of the *segment* containing the symbol. */
10906 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10907 bfd_vma residual;
10908 bfd_signed_vma signed_value;
10909 int group = 0;
10910
10911 /* Determine which groups of bits to calculate. */
10912 switch (r_type)
10913 {
10914 case R_ARM_LDR_PC_G0:
10915 case R_ARM_LDR_SB_G0:
10916 group = 0;
10917 break;
10918
10919 case R_ARM_LDR_PC_G1:
10920 case R_ARM_LDR_SB_G1:
10921 group = 1;
10922 break;
10923
10924 case R_ARM_LDR_PC_G2:
10925 case R_ARM_LDR_SB_G2:
10926 group = 2;
10927 break;
10928
10929 default:
10930 abort ();
10931 }
10932
10933 /* If REL, extract the addend from the insn. If RELA, it will
10934 have already been fetched for us. */
10935 if (globals->use_rel)
10936 {
10937 int negative = (insn & (1 << 23)) ? 1 : -1;
10938 signed_addend = negative * (insn & 0xfff);
10939 }
10940
10941 /* Compute the value (X) to go in the place. */
10942 if (r_type == R_ARM_LDR_PC_G0
10943 || r_type == R_ARM_LDR_PC_G1
10944 || r_type == R_ARM_LDR_PC_G2)
10945 /* PC relative. */
10946 signed_value = value - pc + signed_addend;
10947 else
10948 /* Section base relative. */
10949 signed_value = value - sb + signed_addend;
10950
10951 /* Calculate the value of the relevant G_{n-1} to obtain
10952 the residual at that stage. */
10953 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
10954 group - 1, &residual);
10955
10956 /* Check for overflow. */
10957 if (residual >= 0x1000)
10958 {
10959 (*_bfd_error_handler)
10960 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10961 input_bfd, input_section,
10962 (long) rel->r_offset, labs (signed_value), howto->name);
10963 return bfd_reloc_overflow;
10964 }
10965
10966 /* Mask out the value and U bit. */
10967 insn &= 0xff7ff000;
10968
10969 /* Set the U bit if the value to go in the place is non-negative. */
10970 if (signed_value >= 0)
10971 insn |= 1 << 23;
10972
10973 /* Encode the offset. */
10974 insn |= residual;
10975
10976 bfd_put_32 (input_bfd, insn, hit_data);
10977 }
10978 return bfd_reloc_ok;
10979
10980 case R_ARM_LDRS_PC_G0:
10981 case R_ARM_LDRS_PC_G1:
10982 case R_ARM_LDRS_PC_G2:
10983 case R_ARM_LDRS_SB_G0:
10984 case R_ARM_LDRS_SB_G1:
10985 case R_ARM_LDRS_SB_G2:
10986 {
10987 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10988 bfd_vma pc = input_section->output_section->vma
10989 + input_section->output_offset + rel->r_offset;
10990 /* sb is the origin of the *segment* containing the symbol. */
10991 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10992 bfd_vma residual;
10993 bfd_signed_vma signed_value;
10994 int group = 0;
10995
10996 /* Determine which groups of bits to calculate. */
10997 switch (r_type)
10998 {
10999 case R_ARM_LDRS_PC_G0:
11000 case R_ARM_LDRS_SB_G0:
11001 group = 0;
11002 break;
11003
11004 case R_ARM_LDRS_PC_G1:
11005 case R_ARM_LDRS_SB_G1:
11006 group = 1;
11007 break;
11008
11009 case R_ARM_LDRS_PC_G2:
11010 case R_ARM_LDRS_SB_G2:
11011 group = 2;
11012 break;
11013
11014 default:
11015 abort ();
11016 }
11017
11018 /* If REL, extract the addend from the insn. If RELA, it will
11019 have already been fetched for us. */
11020 if (globals->use_rel)
11021 {
11022 int negative = (insn & (1 << 23)) ? 1 : -1;
11023 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11024 }
11025
11026 /* Compute the value (X) to go in the place. */
11027 if (r_type == R_ARM_LDRS_PC_G0
11028 || r_type == R_ARM_LDRS_PC_G1
11029 || r_type == R_ARM_LDRS_PC_G2)
11030 /* PC relative. */
11031 signed_value = value - pc + signed_addend;
11032 else
11033 /* Section base relative. */
11034 signed_value = value - sb + signed_addend;
11035
11036 /* Calculate the value of the relevant G_{n-1} to obtain
11037 the residual at that stage. */
11038 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11039 group - 1, &residual);
11040
11041 /* Check for overflow. */
11042 if (residual >= 0x100)
11043 {
11044 (*_bfd_error_handler)
11045 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11046 input_bfd, input_section,
11047 (long) rel->r_offset, labs (signed_value), howto->name);
11048 return bfd_reloc_overflow;
11049 }
11050
11051 /* Mask out the value and U bit. */
11052 insn &= 0xff7ff0f0;
11053
11054 /* Set the U bit if the value to go in the place is non-negative. */
11055 if (signed_value >= 0)
11056 insn |= 1 << 23;
11057
11058 /* Encode the offset. */
11059 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11060
11061 bfd_put_32 (input_bfd, insn, hit_data);
11062 }
11063 return bfd_reloc_ok;
11064
11065 case R_ARM_LDC_PC_G0:
11066 case R_ARM_LDC_PC_G1:
11067 case R_ARM_LDC_PC_G2:
11068 case R_ARM_LDC_SB_G0:
11069 case R_ARM_LDC_SB_G1:
11070 case R_ARM_LDC_SB_G2:
11071 {
11072 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11073 bfd_vma pc = input_section->output_section->vma
11074 + input_section->output_offset + rel->r_offset;
11075 /* sb is the origin of the *segment* containing the symbol. */
11076 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11077 bfd_vma residual;
11078 bfd_signed_vma signed_value;
11079 int group = 0;
11080
11081 /* Determine which groups of bits to calculate. */
11082 switch (r_type)
11083 {
11084 case R_ARM_LDC_PC_G0:
11085 case R_ARM_LDC_SB_G0:
11086 group = 0;
11087 break;
11088
11089 case R_ARM_LDC_PC_G1:
11090 case R_ARM_LDC_SB_G1:
11091 group = 1;
11092 break;
11093
11094 case R_ARM_LDC_PC_G2:
11095 case R_ARM_LDC_SB_G2:
11096 group = 2;
11097 break;
11098
11099 default:
11100 abort ();
11101 }
11102
11103 /* If REL, extract the addend from the insn. If RELA, it will
11104 have already been fetched for us. */
11105 if (globals->use_rel)
11106 {
11107 int negative = (insn & (1 << 23)) ? 1 : -1;
11108 signed_addend = negative * ((insn & 0xff) << 2);
11109 }
11110
11111 /* Compute the value (X) to go in the place. */
11112 if (r_type == R_ARM_LDC_PC_G0
11113 || r_type == R_ARM_LDC_PC_G1
11114 || r_type == R_ARM_LDC_PC_G2)
11115 /* PC relative. */
11116 signed_value = value - pc + signed_addend;
11117 else
11118 /* Section base relative. */
11119 signed_value = value - sb + signed_addend;
11120
11121 /* Calculate the value of the relevant G_{n-1} to obtain
11122 the residual at that stage. */
11123 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11124 group - 1, &residual);
11125
11126 /* Check for overflow. (The absolute value to go in the place must be
11127 divisible by four and, after having been divided by four, must
11128 fit in eight bits.) */
11129 if ((residual & 0x3) != 0 || residual >= 0x400)
11130 {
11131 (*_bfd_error_handler)
11132 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11133 input_bfd, input_section,
11134 (long) rel->r_offset, labs (signed_value), howto->name);
11135 return bfd_reloc_overflow;
11136 }
11137
11138 /* Mask out the value and U bit. */
11139 insn &= 0xff7fff00;
11140
11141 /* Set the U bit if the value to go in the place is non-negative. */
11142 if (signed_value >= 0)
11143 insn |= 1 << 23;
11144
11145 /* Encode the offset. */
11146 insn |= residual >> 2;
11147
11148 bfd_put_32 (input_bfd, insn, hit_data);
11149 }
11150 return bfd_reloc_ok;
11151
11152 case R_ARM_THM_ALU_ABS_G0_NC:
11153 case R_ARM_THM_ALU_ABS_G1_NC:
11154 case R_ARM_THM_ALU_ABS_G2_NC:
11155 case R_ARM_THM_ALU_ABS_G3_NC:
11156 {
11157 const int shift_array[4] = {0, 8, 16, 24};
11158 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11159 bfd_vma addr = value;
11160 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11161
11162 /* Compute address. */
11163 if (globals->use_rel)
11164 signed_addend = insn & 0xff;
11165 addr += signed_addend;
11166 if (branch_type == ST_BRANCH_TO_THUMB)
11167 addr |= 1;
11168 /* Clean imm8 insn. */
11169 insn &= 0xff00;
11170 /* And update with correct part of address. */
11171 insn |= (addr >> shift) & 0xff;
11172 /* Update insn. */
11173 bfd_put_16 (input_bfd, insn, hit_data);
11174 }
11175
11176 *unresolved_reloc_p = FALSE;
11177 return bfd_reloc_ok;
11178
11179 default:
11180 return bfd_reloc_notsupported;
11181 }
11182 }
11183
11184 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11185 static void
11186 arm_add_to_rel (bfd * abfd,
11187 bfd_byte * address,
11188 reloc_howto_type * howto,
11189 bfd_signed_vma increment)
11190 {
11191 bfd_signed_vma addend;
11192
11193 if (howto->type == R_ARM_THM_CALL
11194 || howto->type == R_ARM_THM_JUMP24)
11195 {
11196 int upper_insn, lower_insn;
11197 int upper, lower;
11198
11199 upper_insn = bfd_get_16 (abfd, address);
11200 lower_insn = bfd_get_16 (abfd, address + 2);
11201 upper = upper_insn & 0x7ff;
11202 lower = lower_insn & 0x7ff;
11203
11204 addend = (upper << 12) | (lower << 1);
11205 addend += increment;
11206 addend >>= 1;
11207
11208 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11209 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11210
11211 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11212 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11213 }
11214 else
11215 {
11216 bfd_vma contents;
11217
11218 contents = bfd_get_32 (abfd, address);
11219
11220 /* Get the (signed) value from the instruction. */
11221 addend = contents & howto->src_mask;
11222 if (addend & ((howto->src_mask + 1) >> 1))
11223 {
11224 bfd_signed_vma mask;
11225
11226 mask = -1;
11227 mask &= ~ howto->src_mask;
11228 addend |= mask;
11229 }
11230
11231 /* Add in the increment, (which is a byte value). */
11232 switch (howto->type)
11233 {
11234 default:
11235 addend += increment;
11236 break;
11237
11238 case R_ARM_PC24:
11239 case R_ARM_PLT32:
11240 case R_ARM_CALL:
11241 case R_ARM_JUMP24:
11242 addend <<= howto->size;
11243 addend += increment;
11244
11245 /* Should we check for overflow here ? */
11246
11247 /* Drop any undesired bits. */
11248 addend >>= howto->rightshift;
11249 break;
11250 }
11251
11252 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11253
11254 bfd_put_32 (abfd, contents, address);
11255 }
11256 }
11257
11258 #define IS_ARM_TLS_RELOC(R_TYPE) \
11259 ((R_TYPE) == R_ARM_TLS_GD32 \
11260 || (R_TYPE) == R_ARM_TLS_LDO32 \
11261 || (R_TYPE) == R_ARM_TLS_LDM32 \
11262 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11263 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11264 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11265 || (R_TYPE) == R_ARM_TLS_LE32 \
11266 || (R_TYPE) == R_ARM_TLS_IE32 \
11267 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11268
11269 /* Specific set of relocations for the gnu tls dialect. */
11270 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11271 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11272 || (R_TYPE) == R_ARM_TLS_CALL \
11273 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11274 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11275 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11276
11277 /* Relocate an ARM ELF section. */
11278
11279 static bfd_boolean
11280 elf32_arm_relocate_section (bfd * output_bfd,
11281 struct bfd_link_info * info,
11282 bfd * input_bfd,
11283 asection * input_section,
11284 bfd_byte * contents,
11285 Elf_Internal_Rela * relocs,
11286 Elf_Internal_Sym * local_syms,
11287 asection ** local_sections)
11288 {
11289 Elf_Internal_Shdr *symtab_hdr;
11290 struct elf_link_hash_entry **sym_hashes;
11291 Elf_Internal_Rela *rel;
11292 Elf_Internal_Rela *relend;
11293 const char *name;
11294 struct elf32_arm_link_hash_table * globals;
11295
11296 globals = elf32_arm_hash_table (info);
11297 if (globals == NULL)
11298 return FALSE;
11299
11300 symtab_hdr = & elf_symtab_hdr (input_bfd);
11301 sym_hashes = elf_sym_hashes (input_bfd);
11302
11303 rel = relocs;
11304 relend = relocs + input_section->reloc_count;
11305 for (; rel < relend; rel++)
11306 {
11307 int r_type;
11308 reloc_howto_type * howto;
11309 unsigned long r_symndx;
11310 Elf_Internal_Sym * sym;
11311 asection * sec;
11312 struct elf_link_hash_entry * h;
11313 bfd_vma relocation;
11314 bfd_reloc_status_type r;
11315 arelent bfd_reloc;
11316 char sym_type;
11317 bfd_boolean unresolved_reloc = FALSE;
11318 char *error_message = NULL;
11319
11320 r_symndx = ELF32_R_SYM (rel->r_info);
11321 r_type = ELF32_R_TYPE (rel->r_info);
11322 r_type = arm_real_reloc_type (globals, r_type);
11323
11324 if ( r_type == R_ARM_GNU_VTENTRY
11325 || r_type == R_ARM_GNU_VTINHERIT)
11326 continue;
11327
11328 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11329 howto = bfd_reloc.howto;
11330
11331 h = NULL;
11332 sym = NULL;
11333 sec = NULL;
11334
11335 if (r_symndx < symtab_hdr->sh_info)
11336 {
11337 sym = local_syms + r_symndx;
11338 sym_type = ELF32_ST_TYPE (sym->st_info);
11339 sec = local_sections[r_symndx];
11340
11341 /* An object file might have a reference to a local
11342 undefined symbol. This is a daft object file, but we
11343 should at least do something about it. V4BX & NONE
11344 relocations do not use the symbol and are explicitly
11345 allowed to use the undefined symbol, so allow those.
11346 Likewise for relocations against STN_UNDEF. */
11347 if (r_type != R_ARM_V4BX
11348 && r_type != R_ARM_NONE
11349 && r_symndx != STN_UNDEF
11350 && bfd_is_und_section (sec)
11351 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11352 {
11353 if (!info->callbacks->undefined_symbol
11354 (info, bfd_elf_string_from_elf_section
11355 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11356 input_bfd, input_section,
11357 rel->r_offset, TRUE))
11358 return FALSE;
11359 }
11360
11361 if (globals->use_rel)
11362 {
11363 relocation = (sec->output_section->vma
11364 + sec->output_offset
11365 + sym->st_value);
11366 if (!bfd_link_relocatable (info)
11367 && (sec->flags & SEC_MERGE)
11368 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11369 {
11370 asection *msec;
11371 bfd_vma addend, value;
11372
11373 switch (r_type)
11374 {
11375 case R_ARM_MOVW_ABS_NC:
11376 case R_ARM_MOVT_ABS:
11377 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11378 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11379 addend = (addend ^ 0x8000) - 0x8000;
11380 break;
11381
11382 case R_ARM_THM_MOVW_ABS_NC:
11383 case R_ARM_THM_MOVT_ABS:
11384 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11385 << 16;
11386 value |= bfd_get_16 (input_bfd,
11387 contents + rel->r_offset + 2);
11388 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11389 | ((value & 0x04000000) >> 15);
11390 addend = (addend ^ 0x8000) - 0x8000;
11391 break;
11392
11393 default:
11394 if (howto->rightshift
11395 || (howto->src_mask & (howto->src_mask + 1)))
11396 {
11397 (*_bfd_error_handler)
11398 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11399 input_bfd, input_section,
11400 (long) rel->r_offset, howto->name);
11401 return FALSE;
11402 }
11403
11404 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11405
11406 /* Get the (signed) value from the instruction. */
11407 addend = value & howto->src_mask;
11408 if (addend & ((howto->src_mask + 1) >> 1))
11409 {
11410 bfd_signed_vma mask;
11411
11412 mask = -1;
11413 mask &= ~ howto->src_mask;
11414 addend |= mask;
11415 }
11416 break;
11417 }
11418
11419 msec = sec;
11420 addend =
11421 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11422 - relocation;
11423 addend += msec->output_section->vma + msec->output_offset;
11424
11425 /* Cases here must match those in the preceding
11426 switch statement. */
11427 switch (r_type)
11428 {
11429 case R_ARM_MOVW_ABS_NC:
11430 case R_ARM_MOVT_ABS:
11431 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11432 | (addend & 0xfff);
11433 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11434 break;
11435
11436 case R_ARM_THM_MOVW_ABS_NC:
11437 case R_ARM_THM_MOVT_ABS:
11438 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11439 | (addend & 0xff) | ((addend & 0x0800) << 15);
11440 bfd_put_16 (input_bfd, value >> 16,
11441 contents + rel->r_offset);
11442 bfd_put_16 (input_bfd, value,
11443 contents + rel->r_offset + 2);
11444 break;
11445
11446 default:
11447 value = (value & ~ howto->dst_mask)
11448 | (addend & howto->dst_mask);
11449 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11450 break;
11451 }
11452 }
11453 }
11454 else
11455 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11456 }
11457 else
11458 {
11459 bfd_boolean warned, ignored;
11460
11461 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11462 r_symndx, symtab_hdr, sym_hashes,
11463 h, sec, relocation,
11464 unresolved_reloc, warned, ignored);
11465
11466 sym_type = h->type;
11467 }
11468
11469 if (sec != NULL && discarded_section (sec))
11470 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11471 rel, 1, relend, howto, 0, contents);
11472
11473 if (bfd_link_relocatable (info))
11474 {
11475 /* This is a relocatable link. We don't have to change
11476 anything, unless the reloc is against a section symbol,
11477 in which case we have to adjust according to where the
11478 section symbol winds up in the output section. */
11479 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11480 {
11481 if (globals->use_rel)
11482 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11483 howto, (bfd_signed_vma) sec->output_offset);
11484 else
11485 rel->r_addend += sec->output_offset;
11486 }
11487 continue;
11488 }
11489
11490 if (h != NULL)
11491 name = h->root.root.string;
11492 else
11493 {
11494 name = (bfd_elf_string_from_elf_section
11495 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11496 if (name == NULL || *name == '\0')
11497 name = bfd_section_name (input_bfd, sec);
11498 }
11499
11500 if (r_symndx != STN_UNDEF
11501 && r_type != R_ARM_NONE
11502 && (h == NULL
11503 || h->root.type == bfd_link_hash_defined
11504 || h->root.type == bfd_link_hash_defweak)
11505 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11506 {
11507 (*_bfd_error_handler)
11508 ((sym_type == STT_TLS
11509 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11510 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11511 input_bfd,
11512 input_section,
11513 (long) rel->r_offset,
11514 howto->name,
11515 name);
11516 }
11517
11518 /* We call elf32_arm_final_link_relocate unless we're completely
11519 done, i.e., the relaxation produced the final output we want,
11520 and we won't let anybody mess with it. Also, we have to do
11521 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11522 both in relaxed and non-relaxed cases. */
11523 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11524 || (IS_ARM_TLS_GNU_RELOC (r_type)
11525 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11526 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11527 & GOT_TLS_GDESC)))
11528 {
11529 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11530 contents, rel, h == NULL);
11531 /* This may have been marked unresolved because it came from
11532 a shared library. But we've just dealt with that. */
11533 unresolved_reloc = 0;
11534 }
11535 else
11536 r = bfd_reloc_continue;
11537
11538 if (r == bfd_reloc_continue)
11539 {
11540 unsigned char branch_type =
11541 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11542 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11543
11544 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11545 input_section, contents, rel,
11546 relocation, info, sec, name,
11547 sym_type, branch_type, h,
11548 &unresolved_reloc,
11549 &error_message);
11550 }
11551
11552 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11553 because such sections are not SEC_ALLOC and thus ld.so will
11554 not process them. */
11555 if (unresolved_reloc
11556 && !((input_section->flags & SEC_DEBUGGING) != 0
11557 && h->def_dynamic)
11558 && _bfd_elf_section_offset (output_bfd, info, input_section,
11559 rel->r_offset) != (bfd_vma) -1)
11560 {
11561 (*_bfd_error_handler)
11562 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11563 input_bfd,
11564 input_section,
11565 (long) rel->r_offset,
11566 howto->name,
11567 h->root.root.string);
11568 return FALSE;
11569 }
11570
11571 if (r != bfd_reloc_ok)
11572 {
11573 switch (r)
11574 {
11575 case bfd_reloc_overflow:
11576 /* If the overflowing reloc was to an undefined symbol,
11577 we have already printed one error message and there
11578 is no point complaining again. */
11579 if ((! h ||
11580 h->root.type != bfd_link_hash_undefined)
11581 && (!((*info->callbacks->reloc_overflow)
11582 (info, (h ? &h->root : NULL), name, howto->name,
11583 (bfd_vma) 0, input_bfd, input_section,
11584 rel->r_offset))))
11585 return FALSE;
11586 break;
11587
11588 case bfd_reloc_undefined:
11589 if (!((*info->callbacks->undefined_symbol)
11590 (info, name, input_bfd, input_section,
11591 rel->r_offset, TRUE)))
11592 return FALSE;
11593 break;
11594
11595 case bfd_reloc_outofrange:
11596 error_message = _("out of range");
11597 goto common_error;
11598
11599 case bfd_reloc_notsupported:
11600 error_message = _("unsupported relocation");
11601 goto common_error;
11602
11603 case bfd_reloc_dangerous:
11604 /* error_message should already be set. */
11605 goto common_error;
11606
11607 default:
11608 error_message = _("unknown error");
11609 /* Fall through. */
11610
11611 common_error:
11612 BFD_ASSERT (error_message != NULL);
11613 if (!((*info->callbacks->reloc_dangerous)
11614 (info, error_message, input_bfd, input_section,
11615 rel->r_offset)))
11616 return FALSE;
11617 break;
11618 }
11619 }
11620 }
11621
11622 return TRUE;
11623 }
11624
11625 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11626 adds the edit to the start of the list. (The list must be built in order of
11627 ascending TINDEX: the function's callers are primarily responsible for
11628 maintaining that condition). */
11629
11630 static void
11631 add_unwind_table_edit (arm_unwind_table_edit **head,
11632 arm_unwind_table_edit **tail,
11633 arm_unwind_edit_type type,
11634 asection *linked_section,
11635 unsigned int tindex)
11636 {
11637 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11638 xmalloc (sizeof (arm_unwind_table_edit));
11639
11640 new_edit->type = type;
11641 new_edit->linked_section = linked_section;
11642 new_edit->index = tindex;
11643
11644 if (tindex > 0)
11645 {
11646 new_edit->next = NULL;
11647
11648 if (*tail)
11649 (*tail)->next = new_edit;
11650
11651 (*tail) = new_edit;
11652
11653 if (!*head)
11654 (*head) = new_edit;
11655 }
11656 else
11657 {
11658 new_edit->next = *head;
11659
11660 if (!*tail)
11661 *tail = new_edit;
11662
11663 *head = new_edit;
11664 }
11665 }
11666
11667 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11668
11669 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11670 static void
11671 adjust_exidx_size(asection *exidx_sec, int adjust)
11672 {
11673 asection *out_sec;
11674
11675 if (!exidx_sec->rawsize)
11676 exidx_sec->rawsize = exidx_sec->size;
11677
11678 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11679 out_sec = exidx_sec->output_section;
11680 /* Adjust size of output section. */
11681 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11682 }
11683
11684 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11685 static void
11686 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11687 {
11688 struct _arm_elf_section_data *exidx_arm_data;
11689
11690 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11691 add_unwind_table_edit (
11692 &exidx_arm_data->u.exidx.unwind_edit_list,
11693 &exidx_arm_data->u.exidx.unwind_edit_tail,
11694 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11695
11696 exidx_arm_data->additional_reloc_count++;
11697
11698 adjust_exidx_size(exidx_sec, 8);
11699 }
11700
11701 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11702 made to those tables, such that:
11703
11704 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11705 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11706 codes which have been inlined into the index).
11707
11708 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11709
11710 The edits are applied when the tables are written
11711 (in elf32_arm_write_section). */
11712
11713 bfd_boolean
11714 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11715 unsigned int num_text_sections,
11716 struct bfd_link_info *info,
11717 bfd_boolean merge_exidx_entries)
11718 {
11719 bfd *inp;
11720 unsigned int last_second_word = 0, i;
11721 asection *last_exidx_sec = NULL;
11722 asection *last_text_sec = NULL;
11723 int last_unwind_type = -1;
11724
11725 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11726 text sections. */
11727 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11728 {
11729 asection *sec;
11730
11731 for (sec = inp->sections; sec != NULL; sec = sec->next)
11732 {
11733 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11734 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11735
11736 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11737 continue;
11738
11739 if (elf_sec->linked_to)
11740 {
11741 Elf_Internal_Shdr *linked_hdr
11742 = &elf_section_data (elf_sec->linked_to)->this_hdr;
11743 struct _arm_elf_section_data *linked_sec_arm_data
11744 = get_arm_elf_section_data (linked_hdr->bfd_section);
11745
11746 if (linked_sec_arm_data == NULL)
11747 continue;
11748
11749 /* Link this .ARM.exidx section back from the text section it
11750 describes. */
11751 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11752 }
11753 }
11754 }
11755
11756 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11757 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11758 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11759
11760 for (i = 0; i < num_text_sections; i++)
11761 {
11762 asection *sec = text_section_order[i];
11763 asection *exidx_sec;
11764 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11765 struct _arm_elf_section_data *exidx_arm_data;
11766 bfd_byte *contents = NULL;
11767 int deleted_exidx_bytes = 0;
11768 bfd_vma j;
11769 arm_unwind_table_edit *unwind_edit_head = NULL;
11770 arm_unwind_table_edit *unwind_edit_tail = NULL;
11771 Elf_Internal_Shdr *hdr;
11772 bfd *ibfd;
11773
11774 if (arm_data == NULL)
11775 continue;
11776
11777 exidx_sec = arm_data->u.text.arm_exidx_sec;
11778 if (exidx_sec == NULL)
11779 {
11780 /* Section has no unwind data. */
11781 if (last_unwind_type == 0 || !last_exidx_sec)
11782 continue;
11783
11784 /* Ignore zero sized sections. */
11785 if (sec->size == 0)
11786 continue;
11787
11788 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11789 last_unwind_type = 0;
11790 continue;
11791 }
11792
11793 /* Skip /DISCARD/ sections. */
11794 if (bfd_is_abs_section (exidx_sec->output_section))
11795 continue;
11796
11797 hdr = &elf_section_data (exidx_sec)->this_hdr;
11798 if (hdr->sh_type != SHT_ARM_EXIDX)
11799 continue;
11800
11801 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11802 if (exidx_arm_data == NULL)
11803 continue;
11804
11805 ibfd = exidx_sec->owner;
11806
11807 if (hdr->contents != NULL)
11808 contents = hdr->contents;
11809 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11810 /* An error? */
11811 continue;
11812
11813 if (last_unwind_type > 0)
11814 {
11815 unsigned int first_word = bfd_get_32 (ibfd, contents);
11816 /* Add cantunwind if first unwind item does not match section
11817 start. */
11818 if (first_word != sec->vma)
11819 {
11820 insert_cantunwind_after (last_text_sec, last_exidx_sec);
11821 last_unwind_type = 0;
11822 }
11823 }
11824
11825 for (j = 0; j < hdr->sh_size; j += 8)
11826 {
11827 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11828 int unwind_type;
11829 int elide = 0;
11830
11831 /* An EXIDX_CANTUNWIND entry. */
11832 if (second_word == 1)
11833 {
11834 if (last_unwind_type == 0)
11835 elide = 1;
11836 unwind_type = 0;
11837 }
11838 /* Inlined unwinding data. Merge if equal to previous. */
11839 else if ((second_word & 0x80000000) != 0)
11840 {
11841 if (merge_exidx_entries
11842 && last_second_word == second_word && last_unwind_type == 1)
11843 elide = 1;
11844 unwind_type = 1;
11845 last_second_word = second_word;
11846 }
11847 /* Normal table entry. In theory we could merge these too,
11848 but duplicate entries are likely to be much less common. */
11849 else
11850 unwind_type = 2;
11851
11852 if (elide && !bfd_link_relocatable (info))
11853 {
11854 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
11855 DELETE_EXIDX_ENTRY, NULL, j / 8);
11856
11857 deleted_exidx_bytes += 8;
11858 }
11859
11860 last_unwind_type = unwind_type;
11861 }
11862
11863 /* Free contents if we allocated it ourselves. */
11864 if (contents != hdr->contents)
11865 free (contents);
11866
11867 /* Record edits to be applied later (in elf32_arm_write_section). */
11868 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
11869 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
11870
11871 if (deleted_exidx_bytes > 0)
11872 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
11873
11874 last_exidx_sec = exidx_sec;
11875 last_text_sec = sec;
11876 }
11877
11878 /* Add terminating CANTUNWIND entry. */
11879 if (!bfd_link_relocatable (info) && last_exidx_sec
11880 && last_unwind_type != 0)
11881 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11882
11883 return TRUE;
11884 }
11885
11886 static bfd_boolean
11887 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
11888 bfd *ibfd, const char *name)
11889 {
11890 asection *sec, *osec;
11891
11892 sec = bfd_get_linker_section (ibfd, name);
11893 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
11894 return TRUE;
11895
11896 osec = sec->output_section;
11897 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
11898 return TRUE;
11899
11900 if (! bfd_set_section_contents (obfd, osec, sec->contents,
11901 sec->output_offset, sec->size))
11902 return FALSE;
11903
11904 return TRUE;
11905 }
11906
11907 static bfd_boolean
11908 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
11909 {
11910 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
11911 asection *sec, *osec;
11912
11913 if (globals == NULL)
11914 return FALSE;
11915
11916 /* Invoke the regular ELF backend linker to do all the work. */
11917 if (!bfd_elf_final_link (abfd, info))
11918 return FALSE;
11919
11920 /* Process stub sections (eg BE8 encoding, ...). */
11921 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
11922 unsigned int i;
11923 for (i=0; i<htab->top_id; i++)
11924 {
11925 sec = htab->stub_group[i].stub_sec;
11926 /* Only process it once, in its link_sec slot. */
11927 if (sec && i == htab->stub_group[i].link_sec->id)
11928 {
11929 osec = sec->output_section;
11930 elf32_arm_write_section (abfd, info, sec, sec->contents);
11931 if (! bfd_set_section_contents (abfd, osec, sec->contents,
11932 sec->output_offset, sec->size))
11933 return FALSE;
11934 }
11935 }
11936
11937 /* Write out any glue sections now that we have created all the
11938 stubs. */
11939 if (globals->bfd_of_glue_owner != NULL)
11940 {
11941 if (! elf32_arm_output_glue_section (info, abfd,
11942 globals->bfd_of_glue_owner,
11943 ARM2THUMB_GLUE_SECTION_NAME))
11944 return FALSE;
11945
11946 if (! elf32_arm_output_glue_section (info, abfd,
11947 globals->bfd_of_glue_owner,
11948 THUMB2ARM_GLUE_SECTION_NAME))
11949 return FALSE;
11950
11951 if (! elf32_arm_output_glue_section (info, abfd,
11952 globals->bfd_of_glue_owner,
11953 VFP11_ERRATUM_VENEER_SECTION_NAME))
11954 return FALSE;
11955
11956 if (! elf32_arm_output_glue_section (info, abfd,
11957 globals->bfd_of_glue_owner,
11958 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
11959 return FALSE;
11960
11961 if (! elf32_arm_output_glue_section (info, abfd,
11962 globals->bfd_of_glue_owner,
11963 ARM_BX_GLUE_SECTION_NAME))
11964 return FALSE;
11965 }
11966
11967 return TRUE;
11968 }
11969
11970 /* Return a best guess for the machine number based on the attributes. */
11971
11972 static unsigned int
11973 bfd_arm_get_mach_from_attributes (bfd * abfd)
11974 {
11975 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
11976
11977 switch (arch)
11978 {
11979 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
11980 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
11981 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
11982
11983 case TAG_CPU_ARCH_V5TE:
11984 {
11985 char * name;
11986
11987 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
11988 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
11989
11990 if (name)
11991 {
11992 if (strcmp (name, "IWMMXT2") == 0)
11993 return bfd_mach_arm_iWMMXt2;
11994
11995 if (strcmp (name, "IWMMXT") == 0)
11996 return bfd_mach_arm_iWMMXt;
11997
11998 if (strcmp (name, "XSCALE") == 0)
11999 {
12000 int wmmx;
12001
12002 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12003 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12004 switch (wmmx)
12005 {
12006 case 1: return bfd_mach_arm_iWMMXt;
12007 case 2: return bfd_mach_arm_iWMMXt2;
12008 default: return bfd_mach_arm_XScale;
12009 }
12010 }
12011 }
12012
12013 return bfd_mach_arm_5TE;
12014 }
12015
12016 default:
12017 return bfd_mach_arm_unknown;
12018 }
12019 }
12020
12021 /* Set the right machine number. */
12022
12023 static bfd_boolean
12024 elf32_arm_object_p (bfd *abfd)
12025 {
12026 unsigned int mach;
12027
12028 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12029
12030 if (mach == bfd_mach_arm_unknown)
12031 {
12032 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12033 mach = bfd_mach_arm_ep9312;
12034 else
12035 mach = bfd_arm_get_mach_from_attributes (abfd);
12036 }
12037
12038 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12039 return TRUE;
12040 }
12041
12042 /* Function to keep ARM specific flags in the ELF header. */
12043
12044 static bfd_boolean
12045 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12046 {
12047 if (elf_flags_init (abfd)
12048 && elf_elfheader (abfd)->e_flags != flags)
12049 {
12050 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12051 {
12052 if (flags & EF_ARM_INTERWORK)
12053 (*_bfd_error_handler)
12054 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12055 abfd);
12056 else
12057 _bfd_error_handler
12058 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12059 abfd);
12060 }
12061 }
12062 else
12063 {
12064 elf_elfheader (abfd)->e_flags = flags;
12065 elf_flags_init (abfd) = TRUE;
12066 }
12067
12068 return TRUE;
12069 }
12070
12071 /* Copy backend specific data from one object module to another. */
12072
12073 static bfd_boolean
12074 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12075 {
12076 flagword in_flags;
12077 flagword out_flags;
12078
12079 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12080 return TRUE;
12081
12082 in_flags = elf_elfheader (ibfd)->e_flags;
12083 out_flags = elf_elfheader (obfd)->e_flags;
12084
12085 if (elf_flags_init (obfd)
12086 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12087 && in_flags != out_flags)
12088 {
12089 /* Cannot mix APCS26 and APCS32 code. */
12090 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12091 return FALSE;
12092
12093 /* Cannot mix float APCS and non-float APCS code. */
12094 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12095 return FALSE;
12096
12097 /* If the src and dest have different interworking flags
12098 then turn off the interworking bit. */
12099 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12100 {
12101 if (out_flags & EF_ARM_INTERWORK)
12102 _bfd_error_handler
12103 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12104 obfd, ibfd);
12105
12106 in_flags &= ~EF_ARM_INTERWORK;
12107 }
12108
12109 /* Likewise for PIC, though don't warn for this case. */
12110 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12111 in_flags &= ~EF_ARM_PIC;
12112 }
12113
12114 elf_elfheader (obfd)->e_flags = in_flags;
12115 elf_flags_init (obfd) = TRUE;
12116
12117 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12118 }
12119
12120 /* Values for Tag_ABI_PCS_R9_use. */
12121 enum
12122 {
12123 AEABI_R9_V6,
12124 AEABI_R9_SB,
12125 AEABI_R9_TLS,
12126 AEABI_R9_unused
12127 };
12128
12129 /* Values for Tag_ABI_PCS_RW_data. */
12130 enum
12131 {
12132 AEABI_PCS_RW_data_absolute,
12133 AEABI_PCS_RW_data_PCrel,
12134 AEABI_PCS_RW_data_SBrel,
12135 AEABI_PCS_RW_data_unused
12136 };
12137
12138 /* Values for Tag_ABI_enum_size. */
12139 enum
12140 {
12141 AEABI_enum_unused,
12142 AEABI_enum_short,
12143 AEABI_enum_wide,
12144 AEABI_enum_forced_wide
12145 };
12146
12147 /* Determine whether an object attribute tag takes an integer, a
12148 string or both. */
12149
12150 static int
12151 elf32_arm_obj_attrs_arg_type (int tag)
12152 {
12153 if (tag == Tag_compatibility)
12154 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12155 else if (tag == Tag_nodefaults)
12156 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12157 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12158 return ATTR_TYPE_FLAG_STR_VAL;
12159 else if (tag < 32)
12160 return ATTR_TYPE_FLAG_INT_VAL;
12161 else
12162 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12163 }
12164
12165 /* The ABI defines that Tag_conformance should be emitted first, and that
12166 Tag_nodefaults should be second (if either is defined). This sets those
12167 two positions, and bumps up the position of all the remaining tags to
12168 compensate. */
12169 static int
12170 elf32_arm_obj_attrs_order (int num)
12171 {
12172 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12173 return Tag_conformance;
12174 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12175 return Tag_nodefaults;
12176 if ((num - 2) < Tag_nodefaults)
12177 return num - 2;
12178 if ((num - 1) < Tag_conformance)
12179 return num - 1;
12180 return num;
12181 }
12182
12183 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12184 static bfd_boolean
12185 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12186 {
12187 if ((tag & 127) < 64)
12188 {
12189 _bfd_error_handler
12190 (_("%B: Unknown mandatory EABI object attribute %d"),
12191 abfd, tag);
12192 bfd_set_error (bfd_error_bad_value);
12193 return FALSE;
12194 }
12195 else
12196 {
12197 _bfd_error_handler
12198 (_("Warning: %B: Unknown EABI object attribute %d"),
12199 abfd, tag);
12200 return TRUE;
12201 }
12202 }
12203
12204 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12205 Returns -1 if no architecture could be read. */
12206
12207 static int
12208 get_secondary_compatible_arch (bfd *abfd)
12209 {
12210 obj_attribute *attr =
12211 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12212
12213 /* Note: the tag and its argument below are uleb128 values, though
12214 currently-defined values fit in one byte for each. */
12215 if (attr->s
12216 && attr->s[0] == Tag_CPU_arch
12217 && (attr->s[1] & 128) != 128
12218 && attr->s[2] == 0)
12219 return attr->s[1];
12220
12221 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12222 return -1;
12223 }
12224
12225 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12226 The tag is removed if ARCH is -1. */
12227
12228 static void
12229 set_secondary_compatible_arch (bfd *abfd, int arch)
12230 {
12231 obj_attribute *attr =
12232 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12233
12234 if (arch == -1)
12235 {
12236 attr->s = NULL;
12237 return;
12238 }
12239
12240 /* Note: the tag and its argument below are uleb128 values, though
12241 currently-defined values fit in one byte for each. */
12242 if (!attr->s)
12243 attr->s = (char *) bfd_alloc (abfd, 3);
12244 attr->s[0] = Tag_CPU_arch;
12245 attr->s[1] = arch;
12246 attr->s[2] = '\0';
12247 }
12248
12249 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12250 into account. */
12251
12252 static int
12253 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12254 int newtag, int secondary_compat)
12255 {
12256 #define T(X) TAG_CPU_ARCH_##X
12257 int tagl, tagh, result;
12258 const int v6t2[] =
12259 {
12260 T(V6T2), /* PRE_V4. */
12261 T(V6T2), /* V4. */
12262 T(V6T2), /* V4T. */
12263 T(V6T2), /* V5T. */
12264 T(V6T2), /* V5TE. */
12265 T(V6T2), /* V5TEJ. */
12266 T(V6T2), /* V6. */
12267 T(V7), /* V6KZ. */
12268 T(V6T2) /* V6T2. */
12269 };
12270 const int v6k[] =
12271 {
12272 T(V6K), /* PRE_V4. */
12273 T(V6K), /* V4. */
12274 T(V6K), /* V4T. */
12275 T(V6K), /* V5T. */
12276 T(V6K), /* V5TE. */
12277 T(V6K), /* V5TEJ. */
12278 T(V6K), /* V6. */
12279 T(V6KZ), /* V6KZ. */
12280 T(V7), /* V6T2. */
12281 T(V6K) /* V6K. */
12282 };
12283 const int v7[] =
12284 {
12285 T(V7), /* PRE_V4. */
12286 T(V7), /* V4. */
12287 T(V7), /* V4T. */
12288 T(V7), /* V5T. */
12289 T(V7), /* V5TE. */
12290 T(V7), /* V5TEJ. */
12291 T(V7), /* V6. */
12292 T(V7), /* V6KZ. */
12293 T(V7), /* V6T2. */
12294 T(V7), /* V6K. */
12295 T(V7) /* V7. */
12296 };
12297 const int v6_m[] =
12298 {
12299 -1, /* PRE_V4. */
12300 -1, /* V4. */
12301 T(V6K), /* V4T. */
12302 T(V6K), /* V5T. */
12303 T(V6K), /* V5TE. */
12304 T(V6K), /* V5TEJ. */
12305 T(V6K), /* V6. */
12306 T(V6KZ), /* V6KZ. */
12307 T(V7), /* V6T2. */
12308 T(V6K), /* V6K. */
12309 T(V7), /* V7. */
12310 T(V6_M) /* V6_M. */
12311 };
12312 const int v6s_m[] =
12313 {
12314 -1, /* PRE_V4. */
12315 -1, /* V4. */
12316 T(V6K), /* V4T. */
12317 T(V6K), /* V5T. */
12318 T(V6K), /* V5TE. */
12319 T(V6K), /* V5TEJ. */
12320 T(V6K), /* V6. */
12321 T(V6KZ), /* V6KZ. */
12322 T(V7), /* V6T2. */
12323 T(V6K), /* V6K. */
12324 T(V7), /* V7. */
12325 T(V6S_M), /* V6_M. */
12326 T(V6S_M) /* V6S_M. */
12327 };
12328 const int v7e_m[] =
12329 {
12330 -1, /* PRE_V4. */
12331 -1, /* V4. */
12332 T(V7E_M), /* V4T. */
12333 T(V7E_M), /* V5T. */
12334 T(V7E_M), /* V5TE. */
12335 T(V7E_M), /* V5TEJ. */
12336 T(V7E_M), /* V6. */
12337 T(V7E_M), /* V6KZ. */
12338 T(V7E_M), /* V6T2. */
12339 T(V7E_M), /* V6K. */
12340 T(V7E_M), /* V7. */
12341 T(V7E_M), /* V6_M. */
12342 T(V7E_M), /* V6S_M. */
12343 T(V7E_M) /* V7E_M. */
12344 };
12345 const int v8[] =
12346 {
12347 T(V8), /* PRE_V4. */
12348 T(V8), /* V4. */
12349 T(V8), /* V4T. */
12350 T(V8), /* V5T. */
12351 T(V8), /* V5TE. */
12352 T(V8), /* V5TEJ. */
12353 T(V8), /* V6. */
12354 T(V8), /* V6KZ. */
12355 T(V8), /* V6T2. */
12356 T(V8), /* V6K. */
12357 T(V8), /* V7. */
12358 T(V8), /* V6_M. */
12359 T(V8), /* V6S_M. */
12360 T(V8), /* V7E_M. */
12361 T(V8) /* V8. */
12362 };
12363 const int v8m_baseline[] =
12364 {
12365 -1, /* PRE_V4. */
12366 -1, /* V4. */
12367 -1, /* V4T. */
12368 -1, /* V5T. */
12369 -1, /* V5TE. */
12370 -1, /* V5TEJ. */
12371 -1, /* V6. */
12372 -1, /* V6KZ. */
12373 -1, /* V6T2. */
12374 -1, /* V6K. */
12375 -1, /* V7. */
12376 T(V8M_BASE), /* V6_M. */
12377 T(V8M_BASE), /* V6S_M. */
12378 -1, /* V7E_M. */
12379 -1, /* V8. */
12380 -1,
12381 T(V8M_BASE) /* V8-M BASELINE. */
12382 };
12383 const int v8m_mainline[] =
12384 {
12385 -1, /* PRE_V4. */
12386 -1, /* V4. */
12387 -1, /* V4T. */
12388 -1, /* V5T. */
12389 -1, /* V5TE. */
12390 -1, /* V5TEJ. */
12391 -1, /* V6. */
12392 -1, /* V6KZ. */
12393 -1, /* V6T2. */
12394 -1, /* V6K. */
12395 T(V8M_MAIN), /* V7. */
12396 T(V8M_MAIN), /* V6_M. */
12397 T(V8M_MAIN), /* V6S_M. */
12398 T(V8M_MAIN), /* V7E_M. */
12399 -1, /* V8. */
12400 -1,
12401 T(V8M_MAIN), /* V8-M BASELINE. */
12402 T(V8M_MAIN) /* V8-M MAINLINE. */
12403 };
12404 const int v4t_plus_v6_m[] =
12405 {
12406 -1, /* PRE_V4. */
12407 -1, /* V4. */
12408 T(V4T), /* V4T. */
12409 T(V5T), /* V5T. */
12410 T(V5TE), /* V5TE. */
12411 T(V5TEJ), /* V5TEJ. */
12412 T(V6), /* V6. */
12413 T(V6KZ), /* V6KZ. */
12414 T(V6T2), /* V6T2. */
12415 T(V6K), /* V6K. */
12416 T(V7), /* V7. */
12417 T(V6_M), /* V6_M. */
12418 T(V6S_M), /* V6S_M. */
12419 T(V7E_M), /* V7E_M. */
12420 T(V8), /* V8. */
12421 -1, /* Unused. */
12422 T(V8M_BASE), /* V8-M BASELINE. */
12423 T(V8M_MAIN), /* V8-M MAINLINE. */
12424 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12425 };
12426 const int *comb[] =
12427 {
12428 v6t2,
12429 v6k,
12430 v7,
12431 v6_m,
12432 v6s_m,
12433 v7e_m,
12434 v8,
12435 NULL,
12436 v8m_baseline,
12437 v8m_mainline,
12438 /* Pseudo-architecture. */
12439 v4t_plus_v6_m
12440 };
12441
12442 /* Check we've not got a higher architecture than we know about. */
12443
12444 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12445 {
12446 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12447 return -1;
12448 }
12449
12450 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12451
12452 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12453 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12454 oldtag = T(V4T_PLUS_V6_M);
12455
12456 /* And override the new tag if we have a Tag_also_compatible_with on the
12457 input. */
12458
12459 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12460 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12461 newtag = T(V4T_PLUS_V6_M);
12462
12463 tagl = (oldtag < newtag) ? oldtag : newtag;
12464 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12465
12466 /* Architectures before V6KZ add features monotonically. */
12467 if (tagh <= TAG_CPU_ARCH_V6KZ)
12468 return result;
12469
12470 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12471
12472 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12473 as the canonical version. */
12474 if (result == T(V4T_PLUS_V6_M))
12475 {
12476 result = T(V4T);
12477 *secondary_compat_out = T(V6_M);
12478 }
12479 else
12480 *secondary_compat_out = -1;
12481
12482 if (result == -1)
12483 {
12484 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12485 ibfd, oldtag, newtag);
12486 return -1;
12487 }
12488
12489 return result;
12490 #undef T
12491 }
12492
12493 /* Query attributes object to see if integer divide instructions may be
12494 present in an object. */
12495 static bfd_boolean
12496 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12497 {
12498 int arch = attr[Tag_CPU_arch].i;
12499 int profile = attr[Tag_CPU_arch_profile].i;
12500
12501 switch (attr[Tag_DIV_use].i)
12502 {
12503 case 0:
12504 /* Integer divide allowed if instruction contained in archetecture. */
12505 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12506 return TRUE;
12507 else if (arch >= TAG_CPU_ARCH_V7E_M)
12508 return TRUE;
12509 else
12510 return FALSE;
12511
12512 case 1:
12513 /* Integer divide explicitly prohibited. */
12514 return FALSE;
12515
12516 default:
12517 /* Unrecognised case - treat as allowing divide everywhere. */
12518 case 2:
12519 /* Integer divide allowed in ARM state. */
12520 return TRUE;
12521 }
12522 }
12523
12524 /* Query attributes object to see if integer divide instructions are
12525 forbidden to be in the object. This is not the inverse of
12526 elf32_arm_attributes_accept_div. */
12527 static bfd_boolean
12528 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12529 {
12530 return attr[Tag_DIV_use].i == 1;
12531 }
12532
12533 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12534 are conflicting attributes. */
12535
12536 static bfd_boolean
12537 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12538 {
12539 obj_attribute *in_attr;
12540 obj_attribute *out_attr;
12541 /* Some tags have 0 = don't care, 1 = strong requirement,
12542 2 = weak requirement. */
12543 static const int order_021[3] = {0, 2, 1};
12544 int i;
12545 bfd_boolean result = TRUE;
12546 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12547
12548 /* Skip the linker stubs file. This preserves previous behavior
12549 of accepting unknown attributes in the first input file - but
12550 is that a bug? */
12551 if (ibfd->flags & BFD_LINKER_CREATED)
12552 return TRUE;
12553
12554 /* Skip any input that hasn't attribute section.
12555 This enables to link object files without attribute section with
12556 any others. */
12557 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12558 return TRUE;
12559
12560 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12561 {
12562 /* This is the first object. Copy the attributes. */
12563 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12564
12565 out_attr = elf_known_obj_attributes_proc (obfd);
12566
12567 /* Use the Tag_null value to indicate the attributes have been
12568 initialized. */
12569 out_attr[0].i = 1;
12570
12571 /* We do not output objects with Tag_MPextension_use_legacy - we move
12572 the attribute's value to Tag_MPextension_use. */
12573 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12574 {
12575 if (out_attr[Tag_MPextension_use].i != 0
12576 && out_attr[Tag_MPextension_use_legacy].i
12577 != out_attr[Tag_MPextension_use].i)
12578 {
12579 _bfd_error_handler
12580 (_("Error: %B has both the current and legacy "
12581 "Tag_MPextension_use attributes"), ibfd);
12582 result = FALSE;
12583 }
12584
12585 out_attr[Tag_MPextension_use] =
12586 out_attr[Tag_MPextension_use_legacy];
12587 out_attr[Tag_MPextension_use_legacy].type = 0;
12588 out_attr[Tag_MPextension_use_legacy].i = 0;
12589 }
12590
12591 return result;
12592 }
12593
12594 in_attr = elf_known_obj_attributes_proc (ibfd);
12595 out_attr = elf_known_obj_attributes_proc (obfd);
12596 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12597 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12598 {
12599 /* Ignore mismatches if the object doesn't use floating point or is
12600 floating point ABI independent. */
12601 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12602 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12603 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12604 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12605 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12606 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12607 {
12608 _bfd_error_handler
12609 (_("error: %B uses VFP register arguments, %B does not"),
12610 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12611 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12612 result = FALSE;
12613 }
12614 }
12615
12616 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12617 {
12618 /* Merge this attribute with existing attributes. */
12619 switch (i)
12620 {
12621 case Tag_CPU_raw_name:
12622 case Tag_CPU_name:
12623 /* These are merged after Tag_CPU_arch. */
12624 break;
12625
12626 case Tag_ABI_optimization_goals:
12627 case Tag_ABI_FP_optimization_goals:
12628 /* Use the first value seen. */
12629 break;
12630
12631 case Tag_CPU_arch:
12632 {
12633 int secondary_compat = -1, secondary_compat_out = -1;
12634 unsigned int saved_out_attr = out_attr[i].i;
12635 int arch_attr;
12636 static const char *name_table[] =
12637 {
12638 /* These aren't real CPU names, but we can't guess
12639 that from the architecture version alone. */
12640 "Pre v4",
12641 "ARM v4",
12642 "ARM v4T",
12643 "ARM v5T",
12644 "ARM v5TE",
12645 "ARM v5TEJ",
12646 "ARM v6",
12647 "ARM v6KZ",
12648 "ARM v6T2",
12649 "ARM v6K",
12650 "ARM v7",
12651 "ARM v6-M",
12652 "ARM v6S-M",
12653 "ARM v8",
12654 "",
12655 "ARM v8-M.baseline",
12656 "ARM v8-M.mainline",
12657 };
12658
12659 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12660 secondary_compat = get_secondary_compatible_arch (ibfd);
12661 secondary_compat_out = get_secondary_compatible_arch (obfd);
12662 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12663 &secondary_compat_out,
12664 in_attr[i].i,
12665 secondary_compat);
12666
12667 /* Return with error if failed to merge. */
12668 if (arch_attr == -1)
12669 return FALSE;
12670
12671 out_attr[i].i = arch_attr;
12672
12673 set_secondary_compatible_arch (obfd, secondary_compat_out);
12674
12675 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12676 if (out_attr[i].i == saved_out_attr)
12677 ; /* Leave the names alone. */
12678 else if (out_attr[i].i == in_attr[i].i)
12679 {
12680 /* The output architecture has been changed to match the
12681 input architecture. Use the input names. */
12682 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12683 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12684 : NULL;
12685 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12686 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12687 : NULL;
12688 }
12689 else
12690 {
12691 out_attr[Tag_CPU_name].s = NULL;
12692 out_attr[Tag_CPU_raw_name].s = NULL;
12693 }
12694
12695 /* If we still don't have a value for Tag_CPU_name,
12696 make one up now. Tag_CPU_raw_name remains blank. */
12697 if (out_attr[Tag_CPU_name].s == NULL
12698 && out_attr[i].i < ARRAY_SIZE (name_table))
12699 out_attr[Tag_CPU_name].s =
12700 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12701 }
12702 break;
12703
12704 case Tag_ARM_ISA_use:
12705 case Tag_THUMB_ISA_use:
12706 case Tag_WMMX_arch:
12707 case Tag_Advanced_SIMD_arch:
12708 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12709 case Tag_ABI_FP_rounding:
12710 case Tag_ABI_FP_exceptions:
12711 case Tag_ABI_FP_user_exceptions:
12712 case Tag_ABI_FP_number_model:
12713 case Tag_FP_HP_extension:
12714 case Tag_CPU_unaligned_access:
12715 case Tag_T2EE_use:
12716 case Tag_MPextension_use:
12717 /* Use the largest value specified. */
12718 if (in_attr[i].i > out_attr[i].i)
12719 out_attr[i].i = in_attr[i].i;
12720 break;
12721
12722 case Tag_ABI_align_preserved:
12723 case Tag_ABI_PCS_RO_data:
12724 /* Use the smallest value specified. */
12725 if (in_attr[i].i < out_attr[i].i)
12726 out_attr[i].i = in_attr[i].i;
12727 break;
12728
12729 case Tag_ABI_align_needed:
12730 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12731 && (in_attr[Tag_ABI_align_preserved].i == 0
12732 || out_attr[Tag_ABI_align_preserved].i == 0))
12733 {
12734 /* This error message should be enabled once all non-conformant
12735 binaries in the toolchain have had the attributes set
12736 properly.
12737 _bfd_error_handler
12738 (_("error: %B: 8-byte data alignment conflicts with %B"),
12739 obfd, ibfd);
12740 result = FALSE; */
12741 }
12742 /* Fall through. */
12743 case Tag_ABI_FP_denormal:
12744 case Tag_ABI_PCS_GOT_use:
12745 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12746 value if greater than 2 (for future-proofing). */
12747 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12748 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12749 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12750 out_attr[i].i = in_attr[i].i;
12751 break;
12752
12753 case Tag_Virtualization_use:
12754 /* The virtualization tag effectively stores two bits of
12755 information: the intended use of TrustZone (in bit 0), and the
12756 intended use of Virtualization (in bit 1). */
12757 if (out_attr[i].i == 0)
12758 out_attr[i].i = in_attr[i].i;
12759 else if (in_attr[i].i != 0
12760 && in_attr[i].i != out_attr[i].i)
12761 {
12762 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12763 out_attr[i].i = 3;
12764 else
12765 {
12766 _bfd_error_handler
12767 (_("error: %B: unable to merge virtualization attributes "
12768 "with %B"),
12769 obfd, ibfd);
12770 result = FALSE;
12771 }
12772 }
12773 break;
12774
12775 case Tag_CPU_arch_profile:
12776 if (out_attr[i].i != in_attr[i].i)
12777 {
12778 /* 0 will merge with anything.
12779 'A' and 'S' merge to 'A'.
12780 'R' and 'S' merge to 'R'.
12781 'M' and 'A|R|S' is an error. */
12782 if (out_attr[i].i == 0
12783 || (out_attr[i].i == 'S'
12784 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12785 out_attr[i].i = in_attr[i].i;
12786 else if (in_attr[i].i == 0
12787 || (in_attr[i].i == 'S'
12788 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12789 ; /* Do nothing. */
12790 else
12791 {
12792 _bfd_error_handler
12793 (_("error: %B: Conflicting architecture profiles %c/%c"),
12794 ibfd,
12795 in_attr[i].i ? in_attr[i].i : '0',
12796 out_attr[i].i ? out_attr[i].i : '0');
12797 result = FALSE;
12798 }
12799 }
12800 break;
12801
12802 case Tag_DSP_extension:
12803 /* No need to change output value if any of:
12804 - pre (<=) ARMv5T input architecture (do not have DSP)
12805 - M input profile not ARMv7E-M and do not have DSP. */
12806 if (in_attr[Tag_CPU_arch].i <= 3
12807 || (in_attr[Tag_CPU_arch_profile].i == 'M'
12808 && in_attr[Tag_CPU_arch].i != 13
12809 && in_attr[i].i == 0))
12810 ; /* Do nothing. */
12811 /* Output value should be 0 if DSP part of architecture, ie.
12812 - post (>=) ARMv5te architecture output
12813 - A, R or S profile output or ARMv7E-M output architecture. */
12814 else if (out_attr[Tag_CPU_arch].i >= 4
12815 && (out_attr[Tag_CPU_arch_profile].i == 'A'
12816 || out_attr[Tag_CPU_arch_profile].i == 'R'
12817 || out_attr[Tag_CPU_arch_profile].i == 'S'
12818 || out_attr[Tag_CPU_arch].i == 13))
12819 out_attr[i].i = 0;
12820 /* Otherwise, DSP instructions are added and not part of output
12821 architecture. */
12822 else
12823 out_attr[i].i = 1;
12824 break;
12825
12826 case Tag_FP_arch:
12827 {
12828 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12829 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12830 when it's 0. It might mean absence of FP hardware if
12831 Tag_FP_arch is zero. */
12832
12833 #define VFP_VERSION_COUNT 9
12834 static const struct
12835 {
12836 int ver;
12837 int regs;
12838 } vfp_versions[VFP_VERSION_COUNT] =
12839 {
12840 {0, 0},
12841 {1, 16},
12842 {2, 16},
12843 {3, 32},
12844 {3, 16},
12845 {4, 32},
12846 {4, 16},
12847 {8, 32},
12848 {8, 16}
12849 };
12850 int ver;
12851 int regs;
12852 int newval;
12853
12854 /* If the output has no requirement about FP hardware,
12855 follow the requirement of the input. */
12856 if (out_attr[i].i == 0)
12857 {
12858 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
12859 out_attr[i].i = in_attr[i].i;
12860 out_attr[Tag_ABI_HardFP_use].i
12861 = in_attr[Tag_ABI_HardFP_use].i;
12862 break;
12863 }
12864 /* If the input has no requirement about FP hardware, do
12865 nothing. */
12866 else if (in_attr[i].i == 0)
12867 {
12868 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
12869 break;
12870 }
12871
12872 /* Both the input and the output have nonzero Tag_FP_arch.
12873 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
12874
12875 /* If both the input and the output have zero Tag_ABI_HardFP_use,
12876 do nothing. */
12877 if (in_attr[Tag_ABI_HardFP_use].i == 0
12878 && out_attr[Tag_ABI_HardFP_use].i == 0)
12879 ;
12880 /* If the input and the output have different Tag_ABI_HardFP_use,
12881 the combination of them is 0 (implied by Tag_FP_arch). */
12882 else if (in_attr[Tag_ABI_HardFP_use].i
12883 != out_attr[Tag_ABI_HardFP_use].i)
12884 out_attr[Tag_ABI_HardFP_use].i = 0;
12885
12886 /* Now we can handle Tag_FP_arch. */
12887
12888 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
12889 pick the biggest. */
12890 if (in_attr[i].i >= VFP_VERSION_COUNT
12891 && in_attr[i].i > out_attr[i].i)
12892 {
12893 out_attr[i] = in_attr[i];
12894 break;
12895 }
12896 /* The output uses the superset of input features
12897 (ISA version) and registers. */
12898 ver = vfp_versions[in_attr[i].i].ver;
12899 if (ver < vfp_versions[out_attr[i].i].ver)
12900 ver = vfp_versions[out_attr[i].i].ver;
12901 regs = vfp_versions[in_attr[i].i].regs;
12902 if (regs < vfp_versions[out_attr[i].i].regs)
12903 regs = vfp_versions[out_attr[i].i].regs;
12904 /* This assumes all possible supersets are also a valid
12905 options. */
12906 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
12907 {
12908 if (regs == vfp_versions[newval].regs
12909 && ver == vfp_versions[newval].ver)
12910 break;
12911 }
12912 out_attr[i].i = newval;
12913 }
12914 break;
12915 case Tag_PCS_config:
12916 if (out_attr[i].i == 0)
12917 out_attr[i].i = in_attr[i].i;
12918 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
12919 {
12920 /* It's sometimes ok to mix different configs, so this is only
12921 a warning. */
12922 _bfd_error_handler
12923 (_("Warning: %B: Conflicting platform configuration"), ibfd);
12924 }
12925 break;
12926 case Tag_ABI_PCS_R9_use:
12927 if (in_attr[i].i != out_attr[i].i
12928 && out_attr[i].i != AEABI_R9_unused
12929 && in_attr[i].i != AEABI_R9_unused)
12930 {
12931 _bfd_error_handler
12932 (_("error: %B: Conflicting use of R9"), ibfd);
12933 result = FALSE;
12934 }
12935 if (out_attr[i].i == AEABI_R9_unused)
12936 out_attr[i].i = in_attr[i].i;
12937 break;
12938 case Tag_ABI_PCS_RW_data:
12939 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
12940 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
12941 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
12942 {
12943 _bfd_error_handler
12944 (_("error: %B: SB relative addressing conflicts with use of R9"),
12945 ibfd);
12946 result = FALSE;
12947 }
12948 /* Use the smallest value specified. */
12949 if (in_attr[i].i < out_attr[i].i)
12950 out_attr[i].i = in_attr[i].i;
12951 break;
12952 case Tag_ABI_PCS_wchar_t:
12953 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
12954 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
12955 {
12956 _bfd_error_handler
12957 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
12958 ibfd, in_attr[i].i, out_attr[i].i);
12959 }
12960 else if (in_attr[i].i && !out_attr[i].i)
12961 out_attr[i].i = in_attr[i].i;
12962 break;
12963 case Tag_ABI_enum_size:
12964 if (in_attr[i].i != AEABI_enum_unused)
12965 {
12966 if (out_attr[i].i == AEABI_enum_unused
12967 || out_attr[i].i == AEABI_enum_forced_wide)
12968 {
12969 /* The existing object is compatible with anything.
12970 Use whatever requirements the new object has. */
12971 out_attr[i].i = in_attr[i].i;
12972 }
12973 else if (in_attr[i].i != AEABI_enum_forced_wide
12974 && out_attr[i].i != in_attr[i].i
12975 && !elf_arm_tdata (obfd)->no_enum_size_warning)
12976 {
12977 static const char *aeabi_enum_names[] =
12978 { "", "variable-size", "32-bit", "" };
12979 const char *in_name =
12980 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12981 ? aeabi_enum_names[in_attr[i].i]
12982 : "<unknown>";
12983 const char *out_name =
12984 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12985 ? aeabi_enum_names[out_attr[i].i]
12986 : "<unknown>";
12987 _bfd_error_handler
12988 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
12989 ibfd, in_name, out_name);
12990 }
12991 }
12992 break;
12993 case Tag_ABI_VFP_args:
12994 /* Aready done. */
12995 break;
12996 case Tag_ABI_WMMX_args:
12997 if (in_attr[i].i != out_attr[i].i)
12998 {
12999 _bfd_error_handler
13000 (_("error: %B uses iWMMXt register arguments, %B does not"),
13001 ibfd, obfd);
13002 result = FALSE;
13003 }
13004 break;
13005 case Tag_compatibility:
13006 /* Merged in target-independent code. */
13007 break;
13008 case Tag_ABI_HardFP_use:
13009 /* This is handled along with Tag_FP_arch. */
13010 break;
13011 case Tag_ABI_FP_16bit_format:
13012 if (in_attr[i].i != 0 && out_attr[i].i != 0)
13013 {
13014 if (in_attr[i].i != out_attr[i].i)
13015 {
13016 _bfd_error_handler
13017 (_("error: fp16 format mismatch between %B and %B"),
13018 ibfd, obfd);
13019 result = FALSE;
13020 }
13021 }
13022 if (in_attr[i].i != 0)
13023 out_attr[i].i = in_attr[i].i;
13024 break;
13025
13026 case Tag_DIV_use:
13027 /* A value of zero on input means that the divide instruction may
13028 be used if available in the base architecture as specified via
13029 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13030 the user did not want divide instructions. A value of 2
13031 explicitly means that divide instructions were allowed in ARM
13032 and Thumb state. */
13033 if (in_attr[i].i == out_attr[i].i)
13034 /* Do nothing. */ ;
13035 else if (elf32_arm_attributes_forbid_div (in_attr)
13036 && !elf32_arm_attributes_accept_div (out_attr))
13037 out_attr[i].i = 1;
13038 else if (elf32_arm_attributes_forbid_div (out_attr)
13039 && elf32_arm_attributes_accept_div (in_attr))
13040 out_attr[i].i = in_attr[i].i;
13041 else if (in_attr[i].i == 2)
13042 out_attr[i].i = in_attr[i].i;
13043 break;
13044
13045 case Tag_MPextension_use_legacy:
13046 /* We don't output objects with Tag_MPextension_use_legacy - we
13047 move the value to Tag_MPextension_use. */
13048 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13049 {
13050 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13051 {
13052 _bfd_error_handler
13053 (_("%B has has both the current and legacy "
13054 "Tag_MPextension_use attributes"),
13055 ibfd);
13056 result = FALSE;
13057 }
13058 }
13059
13060 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13061 out_attr[Tag_MPextension_use] = in_attr[i];
13062
13063 break;
13064
13065 case Tag_nodefaults:
13066 /* This tag is set if it exists, but the value is unused (and is
13067 typically zero). We don't actually need to do anything here -
13068 the merge happens automatically when the type flags are merged
13069 below. */
13070 break;
13071 case Tag_also_compatible_with:
13072 /* Already done in Tag_CPU_arch. */
13073 break;
13074 case Tag_conformance:
13075 /* Keep the attribute if it matches. Throw it away otherwise.
13076 No attribute means no claim to conform. */
13077 if (!in_attr[i].s || !out_attr[i].s
13078 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13079 out_attr[i].s = NULL;
13080 break;
13081
13082 default:
13083 result
13084 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13085 }
13086
13087 /* If out_attr was copied from in_attr then it won't have a type yet. */
13088 if (in_attr[i].type && !out_attr[i].type)
13089 out_attr[i].type = in_attr[i].type;
13090 }
13091
13092 /* Merge Tag_compatibility attributes and any common GNU ones. */
13093 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13094 return FALSE;
13095
13096 /* Check for any attributes not known on ARM. */
13097 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13098
13099 return result;
13100 }
13101
13102
13103 /* Return TRUE if the two EABI versions are incompatible. */
13104
13105 static bfd_boolean
13106 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13107 {
13108 /* v4 and v5 are the same spec before and after it was released,
13109 so allow mixing them. */
13110 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13111 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13112 return TRUE;
13113
13114 return (iver == over);
13115 }
13116
13117 /* Merge backend specific data from an object file to the output
13118 object file when linking. */
13119
13120 static bfd_boolean
13121 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13122
13123 /* Display the flags field. */
13124
13125 static bfd_boolean
13126 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13127 {
13128 FILE * file = (FILE *) ptr;
13129 unsigned long flags;
13130
13131 BFD_ASSERT (abfd != NULL && ptr != NULL);
13132
13133 /* Print normal ELF private data. */
13134 _bfd_elf_print_private_bfd_data (abfd, ptr);
13135
13136 flags = elf_elfheader (abfd)->e_flags;
13137 /* Ignore init flag - it may not be set, despite the flags field
13138 containing valid data. */
13139
13140 /* xgettext:c-format */
13141 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13142
13143 switch (EF_ARM_EABI_VERSION (flags))
13144 {
13145 case EF_ARM_EABI_UNKNOWN:
13146 /* The following flag bits are GNU extensions and not part of the
13147 official ARM ELF extended ABI. Hence they are only decoded if
13148 the EABI version is not set. */
13149 if (flags & EF_ARM_INTERWORK)
13150 fprintf (file, _(" [interworking enabled]"));
13151
13152 if (flags & EF_ARM_APCS_26)
13153 fprintf (file, " [APCS-26]");
13154 else
13155 fprintf (file, " [APCS-32]");
13156
13157 if (flags & EF_ARM_VFP_FLOAT)
13158 fprintf (file, _(" [VFP float format]"));
13159 else if (flags & EF_ARM_MAVERICK_FLOAT)
13160 fprintf (file, _(" [Maverick float format]"));
13161 else
13162 fprintf (file, _(" [FPA float format]"));
13163
13164 if (flags & EF_ARM_APCS_FLOAT)
13165 fprintf (file, _(" [floats passed in float registers]"));
13166
13167 if (flags & EF_ARM_PIC)
13168 fprintf (file, _(" [position independent]"));
13169
13170 if (flags & EF_ARM_NEW_ABI)
13171 fprintf (file, _(" [new ABI]"));
13172
13173 if (flags & EF_ARM_OLD_ABI)
13174 fprintf (file, _(" [old ABI]"));
13175
13176 if (flags & EF_ARM_SOFT_FLOAT)
13177 fprintf (file, _(" [software FP]"));
13178
13179 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13180 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13181 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13182 | EF_ARM_MAVERICK_FLOAT);
13183 break;
13184
13185 case EF_ARM_EABI_VER1:
13186 fprintf (file, _(" [Version1 EABI]"));
13187
13188 if (flags & EF_ARM_SYMSARESORTED)
13189 fprintf (file, _(" [sorted symbol table]"));
13190 else
13191 fprintf (file, _(" [unsorted symbol table]"));
13192
13193 flags &= ~ EF_ARM_SYMSARESORTED;
13194 break;
13195
13196 case EF_ARM_EABI_VER2:
13197 fprintf (file, _(" [Version2 EABI]"));
13198
13199 if (flags & EF_ARM_SYMSARESORTED)
13200 fprintf (file, _(" [sorted symbol table]"));
13201 else
13202 fprintf (file, _(" [unsorted symbol table]"));
13203
13204 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13205 fprintf (file, _(" [dynamic symbols use segment index]"));
13206
13207 if (flags & EF_ARM_MAPSYMSFIRST)
13208 fprintf (file, _(" [mapping symbols precede others]"));
13209
13210 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13211 | EF_ARM_MAPSYMSFIRST);
13212 break;
13213
13214 case EF_ARM_EABI_VER3:
13215 fprintf (file, _(" [Version3 EABI]"));
13216 break;
13217
13218 case EF_ARM_EABI_VER4:
13219 fprintf (file, _(" [Version4 EABI]"));
13220 goto eabi;
13221
13222 case EF_ARM_EABI_VER5:
13223 fprintf (file, _(" [Version5 EABI]"));
13224
13225 if (flags & EF_ARM_ABI_FLOAT_SOFT)
13226 fprintf (file, _(" [soft-float ABI]"));
13227
13228 if (flags & EF_ARM_ABI_FLOAT_HARD)
13229 fprintf (file, _(" [hard-float ABI]"));
13230
13231 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13232
13233 eabi:
13234 if (flags & EF_ARM_BE8)
13235 fprintf (file, _(" [BE8]"));
13236
13237 if (flags & EF_ARM_LE8)
13238 fprintf (file, _(" [LE8]"));
13239
13240 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13241 break;
13242
13243 default:
13244 fprintf (file, _(" <EABI version unrecognised>"));
13245 break;
13246 }
13247
13248 flags &= ~ EF_ARM_EABIMASK;
13249
13250 if (flags & EF_ARM_RELEXEC)
13251 fprintf (file, _(" [relocatable executable]"));
13252
13253 flags &= ~EF_ARM_RELEXEC;
13254
13255 if (flags)
13256 fprintf (file, _("<Unrecognised flag bits set>"));
13257
13258 fputc ('\n', file);
13259
13260 return TRUE;
13261 }
13262
13263 static int
13264 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13265 {
13266 switch (ELF_ST_TYPE (elf_sym->st_info))
13267 {
13268 case STT_ARM_TFUNC:
13269 return ELF_ST_TYPE (elf_sym->st_info);
13270
13271 case STT_ARM_16BIT:
13272 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13273 This allows us to distinguish between data used by Thumb instructions
13274 and non-data (which is probably code) inside Thumb regions of an
13275 executable. */
13276 if (type != STT_OBJECT && type != STT_TLS)
13277 return ELF_ST_TYPE (elf_sym->st_info);
13278 break;
13279
13280 default:
13281 break;
13282 }
13283
13284 return type;
13285 }
13286
13287 static asection *
13288 elf32_arm_gc_mark_hook (asection *sec,
13289 struct bfd_link_info *info,
13290 Elf_Internal_Rela *rel,
13291 struct elf_link_hash_entry *h,
13292 Elf_Internal_Sym *sym)
13293 {
13294 if (h != NULL)
13295 switch (ELF32_R_TYPE (rel->r_info))
13296 {
13297 case R_ARM_GNU_VTINHERIT:
13298 case R_ARM_GNU_VTENTRY:
13299 return NULL;
13300 }
13301
13302 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13303 }
13304
13305 /* Update the got entry reference counts for the section being removed. */
13306
13307 static bfd_boolean
13308 elf32_arm_gc_sweep_hook (bfd * abfd,
13309 struct bfd_link_info * info,
13310 asection * sec,
13311 const Elf_Internal_Rela * relocs)
13312 {
13313 Elf_Internal_Shdr *symtab_hdr;
13314 struct elf_link_hash_entry **sym_hashes;
13315 bfd_signed_vma *local_got_refcounts;
13316 const Elf_Internal_Rela *rel, *relend;
13317 struct elf32_arm_link_hash_table * globals;
13318
13319 if (bfd_link_relocatable (info))
13320 return TRUE;
13321
13322 globals = elf32_arm_hash_table (info);
13323 if (globals == NULL)
13324 return FALSE;
13325
13326 elf_section_data (sec)->local_dynrel = NULL;
13327
13328 symtab_hdr = & elf_symtab_hdr (abfd);
13329 sym_hashes = elf_sym_hashes (abfd);
13330 local_got_refcounts = elf_local_got_refcounts (abfd);
13331
13332 check_use_blx (globals);
13333
13334 relend = relocs + sec->reloc_count;
13335 for (rel = relocs; rel < relend; rel++)
13336 {
13337 unsigned long r_symndx;
13338 struct elf_link_hash_entry *h = NULL;
13339 struct elf32_arm_link_hash_entry *eh;
13340 int r_type;
13341 bfd_boolean call_reloc_p;
13342 bfd_boolean may_become_dynamic_p;
13343 bfd_boolean may_need_local_target_p;
13344 union gotplt_union *root_plt;
13345 struct arm_plt_info *arm_plt;
13346
13347 r_symndx = ELF32_R_SYM (rel->r_info);
13348 if (r_symndx >= symtab_hdr->sh_info)
13349 {
13350 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13351 while (h->root.type == bfd_link_hash_indirect
13352 || h->root.type == bfd_link_hash_warning)
13353 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13354 }
13355 eh = (struct elf32_arm_link_hash_entry *) h;
13356
13357 call_reloc_p = FALSE;
13358 may_become_dynamic_p = FALSE;
13359 may_need_local_target_p = FALSE;
13360
13361 r_type = ELF32_R_TYPE (rel->r_info);
13362 r_type = arm_real_reloc_type (globals, r_type);
13363 switch (r_type)
13364 {
13365 case R_ARM_GOT32:
13366 case R_ARM_GOT_PREL:
13367 case R_ARM_TLS_GD32:
13368 case R_ARM_TLS_IE32:
13369 if (h != NULL)
13370 {
13371 if (h->got.refcount > 0)
13372 h->got.refcount -= 1;
13373 }
13374 else if (local_got_refcounts != NULL)
13375 {
13376 if (local_got_refcounts[r_symndx] > 0)
13377 local_got_refcounts[r_symndx] -= 1;
13378 }
13379 break;
13380
13381 case R_ARM_TLS_LDM32:
13382 globals->tls_ldm_got.refcount -= 1;
13383 break;
13384
13385 case R_ARM_PC24:
13386 case R_ARM_PLT32:
13387 case R_ARM_CALL:
13388 case R_ARM_JUMP24:
13389 case R_ARM_PREL31:
13390 case R_ARM_THM_CALL:
13391 case R_ARM_THM_JUMP24:
13392 case R_ARM_THM_JUMP19:
13393 call_reloc_p = TRUE;
13394 may_need_local_target_p = TRUE;
13395 break;
13396
13397 case R_ARM_ABS12:
13398 if (!globals->vxworks_p)
13399 {
13400 may_need_local_target_p = TRUE;
13401 break;
13402 }
13403 /* Fall through. */
13404 case R_ARM_ABS32:
13405 case R_ARM_ABS32_NOI:
13406 case R_ARM_REL32:
13407 case R_ARM_REL32_NOI:
13408 case R_ARM_MOVW_ABS_NC:
13409 case R_ARM_MOVT_ABS:
13410 case R_ARM_MOVW_PREL_NC:
13411 case R_ARM_MOVT_PREL:
13412 case R_ARM_THM_MOVW_ABS_NC:
13413 case R_ARM_THM_MOVT_ABS:
13414 case R_ARM_THM_MOVW_PREL_NC:
13415 case R_ARM_THM_MOVT_PREL:
13416 /* Should the interworking branches be here also? */
13417 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13418 && (sec->flags & SEC_ALLOC) != 0)
13419 {
13420 if (h == NULL
13421 && elf32_arm_howto_from_type (r_type)->pc_relative)
13422 {
13423 call_reloc_p = TRUE;
13424 may_need_local_target_p = TRUE;
13425 }
13426 else
13427 may_become_dynamic_p = TRUE;
13428 }
13429 else
13430 may_need_local_target_p = TRUE;
13431 break;
13432
13433 default:
13434 break;
13435 }
13436
13437 if (may_need_local_target_p
13438 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13439 {
13440 /* If PLT refcount book-keeping is wrong and too low, we'll
13441 see a zero value (going to -1) for the root PLT reference
13442 count. */
13443 if (root_plt->refcount >= 0)
13444 {
13445 BFD_ASSERT (root_plt->refcount != 0);
13446 root_plt->refcount -= 1;
13447 }
13448 else
13449 /* A value of -1 means the symbol has become local, forced
13450 or seeing a hidden definition. Any other negative value
13451 is an error. */
13452 BFD_ASSERT (root_plt->refcount == -1);
13453
13454 if (!call_reloc_p)
13455 arm_plt->noncall_refcount--;
13456
13457 if (r_type == R_ARM_THM_CALL)
13458 arm_plt->maybe_thumb_refcount--;
13459
13460 if (r_type == R_ARM_THM_JUMP24
13461 || r_type == R_ARM_THM_JUMP19)
13462 arm_plt->thumb_refcount--;
13463 }
13464
13465 if (may_become_dynamic_p)
13466 {
13467 struct elf_dyn_relocs **pp;
13468 struct elf_dyn_relocs *p;
13469
13470 if (h != NULL)
13471 pp = &(eh->dyn_relocs);
13472 else
13473 {
13474 Elf_Internal_Sym *isym;
13475
13476 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13477 abfd, r_symndx);
13478 if (isym == NULL)
13479 return FALSE;
13480 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13481 if (pp == NULL)
13482 return FALSE;
13483 }
13484 for (; (p = *pp) != NULL; pp = &p->next)
13485 if (p->sec == sec)
13486 {
13487 /* Everything must go for SEC. */
13488 *pp = p->next;
13489 break;
13490 }
13491 }
13492 }
13493
13494 return TRUE;
13495 }
13496
13497 /* Look through the relocs for a section during the first phase. */
13498
13499 static bfd_boolean
13500 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13501 asection *sec, const Elf_Internal_Rela *relocs)
13502 {
13503 Elf_Internal_Shdr *symtab_hdr;
13504 struct elf_link_hash_entry **sym_hashes;
13505 const Elf_Internal_Rela *rel;
13506 const Elf_Internal_Rela *rel_end;
13507 bfd *dynobj;
13508 asection *sreloc;
13509 struct elf32_arm_link_hash_table *htab;
13510 bfd_boolean call_reloc_p;
13511 bfd_boolean may_become_dynamic_p;
13512 bfd_boolean may_need_local_target_p;
13513 unsigned long nsyms;
13514
13515 if (bfd_link_relocatable (info))
13516 return TRUE;
13517
13518 BFD_ASSERT (is_arm_elf (abfd));
13519
13520 htab = elf32_arm_hash_table (info);
13521 if (htab == NULL)
13522 return FALSE;
13523
13524 sreloc = NULL;
13525
13526 /* Create dynamic sections for relocatable executables so that we can
13527 copy relocations. */
13528 if (htab->root.is_relocatable_executable
13529 && ! htab->root.dynamic_sections_created)
13530 {
13531 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13532 return FALSE;
13533 }
13534
13535 if (htab->root.dynobj == NULL)
13536 htab->root.dynobj = abfd;
13537 if (!create_ifunc_sections (info))
13538 return FALSE;
13539
13540 dynobj = htab->root.dynobj;
13541
13542 symtab_hdr = & elf_symtab_hdr (abfd);
13543 sym_hashes = elf_sym_hashes (abfd);
13544 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13545
13546 rel_end = relocs + sec->reloc_count;
13547 for (rel = relocs; rel < rel_end; rel++)
13548 {
13549 Elf_Internal_Sym *isym;
13550 struct elf_link_hash_entry *h;
13551 struct elf32_arm_link_hash_entry *eh;
13552 unsigned long r_symndx;
13553 int r_type;
13554
13555 r_symndx = ELF32_R_SYM (rel->r_info);
13556 r_type = ELF32_R_TYPE (rel->r_info);
13557 r_type = arm_real_reloc_type (htab, r_type);
13558
13559 if (r_symndx >= nsyms
13560 /* PR 9934: It is possible to have relocations that do not
13561 refer to symbols, thus it is also possible to have an
13562 object file containing relocations but no symbol table. */
13563 && (r_symndx > STN_UNDEF || nsyms > 0))
13564 {
13565 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13566 r_symndx);
13567 return FALSE;
13568 }
13569
13570 h = NULL;
13571 isym = NULL;
13572 if (nsyms > 0)
13573 {
13574 if (r_symndx < symtab_hdr->sh_info)
13575 {
13576 /* A local symbol. */
13577 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13578 abfd, r_symndx);
13579 if (isym == NULL)
13580 return FALSE;
13581 }
13582 else
13583 {
13584 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13585 while (h->root.type == bfd_link_hash_indirect
13586 || h->root.type == bfd_link_hash_warning)
13587 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13588
13589 /* PR15323, ref flags aren't set for references in the
13590 same object. */
13591 h->root.non_ir_ref = 1;
13592 }
13593 }
13594
13595 eh = (struct elf32_arm_link_hash_entry *) h;
13596
13597 call_reloc_p = FALSE;
13598 may_become_dynamic_p = FALSE;
13599 may_need_local_target_p = FALSE;
13600
13601 /* Could be done earlier, if h were already available. */
13602 r_type = elf32_arm_tls_transition (info, r_type, h);
13603 switch (r_type)
13604 {
13605 case R_ARM_GOT32:
13606 case R_ARM_GOT_PREL:
13607 case R_ARM_TLS_GD32:
13608 case R_ARM_TLS_IE32:
13609 case R_ARM_TLS_GOTDESC:
13610 case R_ARM_TLS_DESCSEQ:
13611 case R_ARM_THM_TLS_DESCSEQ:
13612 case R_ARM_TLS_CALL:
13613 case R_ARM_THM_TLS_CALL:
13614 /* This symbol requires a global offset table entry. */
13615 {
13616 int tls_type, old_tls_type;
13617
13618 switch (r_type)
13619 {
13620 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13621
13622 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13623
13624 case R_ARM_TLS_GOTDESC:
13625 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13626 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13627 tls_type = GOT_TLS_GDESC; break;
13628
13629 default: tls_type = GOT_NORMAL; break;
13630 }
13631
13632 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13633 info->flags |= DF_STATIC_TLS;
13634
13635 if (h != NULL)
13636 {
13637 h->got.refcount++;
13638 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13639 }
13640 else
13641 {
13642 /* This is a global offset table entry for a local symbol. */
13643 if (!elf32_arm_allocate_local_sym_info (abfd))
13644 return FALSE;
13645 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13646 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13647 }
13648
13649 /* If a variable is accessed with both tls methods, two
13650 slots may be created. */
13651 if (GOT_TLS_GD_ANY_P (old_tls_type)
13652 && GOT_TLS_GD_ANY_P (tls_type))
13653 tls_type |= old_tls_type;
13654
13655 /* We will already have issued an error message if there
13656 is a TLS/non-TLS mismatch, based on the symbol
13657 type. So just combine any TLS types needed. */
13658 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13659 && tls_type != GOT_NORMAL)
13660 tls_type |= old_tls_type;
13661
13662 /* If the symbol is accessed in both IE and GDESC
13663 method, we're able to relax. Turn off the GDESC flag,
13664 without messing up with any other kind of tls types
13665 that may be involved. */
13666 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13667 tls_type &= ~GOT_TLS_GDESC;
13668
13669 if (old_tls_type != tls_type)
13670 {
13671 if (h != NULL)
13672 elf32_arm_hash_entry (h)->tls_type = tls_type;
13673 else
13674 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13675 }
13676 }
13677 /* Fall through. */
13678
13679 case R_ARM_TLS_LDM32:
13680 if (r_type == R_ARM_TLS_LDM32)
13681 htab->tls_ldm_got.refcount++;
13682 /* Fall through. */
13683
13684 case R_ARM_GOTOFF32:
13685 case R_ARM_GOTPC:
13686 if (htab->root.sgot == NULL
13687 && !create_got_section (htab->root.dynobj, info))
13688 return FALSE;
13689 break;
13690
13691 case R_ARM_PC24:
13692 case R_ARM_PLT32:
13693 case R_ARM_CALL:
13694 case R_ARM_JUMP24:
13695 case R_ARM_PREL31:
13696 case R_ARM_THM_CALL:
13697 case R_ARM_THM_JUMP24:
13698 case R_ARM_THM_JUMP19:
13699 call_reloc_p = TRUE;
13700 may_need_local_target_p = TRUE;
13701 break;
13702
13703 case R_ARM_ABS12:
13704 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13705 ldr __GOTT_INDEX__ offsets. */
13706 if (!htab->vxworks_p)
13707 {
13708 may_need_local_target_p = TRUE;
13709 break;
13710 }
13711 else goto jump_over;
13712
13713 /* Fall through. */
13714
13715 case R_ARM_MOVW_ABS_NC:
13716 case R_ARM_MOVT_ABS:
13717 case R_ARM_THM_MOVW_ABS_NC:
13718 case R_ARM_THM_MOVT_ABS:
13719 if (bfd_link_pic (info))
13720 {
13721 (*_bfd_error_handler)
13722 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13723 abfd, elf32_arm_howto_table_1[r_type].name,
13724 (h) ? h->root.root.string : "a local symbol");
13725 bfd_set_error (bfd_error_bad_value);
13726 return FALSE;
13727 }
13728
13729 /* Fall through. */
13730 case R_ARM_ABS32:
13731 case R_ARM_ABS32_NOI:
13732 jump_over:
13733 if (h != NULL && bfd_link_executable (info))
13734 {
13735 h->pointer_equality_needed = 1;
13736 }
13737 /* Fall through. */
13738 case R_ARM_REL32:
13739 case R_ARM_REL32_NOI:
13740 case R_ARM_MOVW_PREL_NC:
13741 case R_ARM_MOVT_PREL:
13742 case R_ARM_THM_MOVW_PREL_NC:
13743 case R_ARM_THM_MOVT_PREL:
13744
13745 /* Should the interworking branches be listed here? */
13746 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13747 && (sec->flags & SEC_ALLOC) != 0)
13748 {
13749 if (h == NULL
13750 && elf32_arm_howto_from_type (r_type)->pc_relative)
13751 {
13752 /* In shared libraries and relocatable executables,
13753 we treat local relative references as calls;
13754 see the related SYMBOL_CALLS_LOCAL code in
13755 allocate_dynrelocs. */
13756 call_reloc_p = TRUE;
13757 may_need_local_target_p = TRUE;
13758 }
13759 else
13760 /* We are creating a shared library or relocatable
13761 executable, and this is a reloc against a global symbol,
13762 or a non-PC-relative reloc against a local symbol.
13763 We may need to copy the reloc into the output. */
13764 may_become_dynamic_p = TRUE;
13765 }
13766 else
13767 may_need_local_target_p = TRUE;
13768 break;
13769
13770 /* This relocation describes the C++ object vtable hierarchy.
13771 Reconstruct it for later use during GC. */
13772 case R_ARM_GNU_VTINHERIT:
13773 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13774 return FALSE;
13775 break;
13776
13777 /* This relocation describes which C++ vtable entries are actually
13778 used. Record for later use during GC. */
13779 case R_ARM_GNU_VTENTRY:
13780 BFD_ASSERT (h != NULL);
13781 if (h != NULL
13782 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13783 return FALSE;
13784 break;
13785 }
13786
13787 if (h != NULL)
13788 {
13789 if (call_reloc_p)
13790 /* We may need a .plt entry if the function this reloc
13791 refers to is in a different object, regardless of the
13792 symbol's type. We can't tell for sure yet, because
13793 something later might force the symbol local. */
13794 h->needs_plt = 1;
13795 else if (may_need_local_target_p)
13796 /* If this reloc is in a read-only section, we might
13797 need a copy reloc. We can't check reliably at this
13798 stage whether the section is read-only, as input
13799 sections have not yet been mapped to output sections.
13800 Tentatively set the flag for now, and correct in
13801 adjust_dynamic_symbol. */
13802 h->non_got_ref = 1;
13803 }
13804
13805 if (may_need_local_target_p
13806 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
13807 {
13808 union gotplt_union *root_plt;
13809 struct arm_plt_info *arm_plt;
13810 struct arm_local_iplt_info *local_iplt;
13811
13812 if (h != NULL)
13813 {
13814 root_plt = &h->plt;
13815 arm_plt = &eh->plt;
13816 }
13817 else
13818 {
13819 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
13820 if (local_iplt == NULL)
13821 return FALSE;
13822 root_plt = &local_iplt->root;
13823 arm_plt = &local_iplt->arm;
13824 }
13825
13826 /* If the symbol is a function that doesn't bind locally,
13827 this relocation will need a PLT entry. */
13828 if (root_plt->refcount != -1)
13829 root_plt->refcount += 1;
13830
13831 if (!call_reloc_p)
13832 arm_plt->noncall_refcount++;
13833
13834 /* It's too early to use htab->use_blx here, so we have to
13835 record possible blx references separately from
13836 relocs that definitely need a thumb stub. */
13837
13838 if (r_type == R_ARM_THM_CALL)
13839 arm_plt->maybe_thumb_refcount += 1;
13840
13841 if (r_type == R_ARM_THM_JUMP24
13842 || r_type == R_ARM_THM_JUMP19)
13843 arm_plt->thumb_refcount += 1;
13844 }
13845
13846 if (may_become_dynamic_p)
13847 {
13848 struct elf_dyn_relocs *p, **head;
13849
13850 /* Create a reloc section in dynobj. */
13851 if (sreloc == NULL)
13852 {
13853 sreloc = _bfd_elf_make_dynamic_reloc_section
13854 (sec, dynobj, 2, abfd, ! htab->use_rel);
13855
13856 if (sreloc == NULL)
13857 return FALSE;
13858
13859 /* BPABI objects never have dynamic relocations mapped. */
13860 if (htab->symbian_p)
13861 {
13862 flagword flags;
13863
13864 flags = bfd_get_section_flags (dynobj, sreloc);
13865 flags &= ~(SEC_LOAD | SEC_ALLOC);
13866 bfd_set_section_flags (dynobj, sreloc, flags);
13867 }
13868 }
13869
13870 /* If this is a global symbol, count the number of
13871 relocations we need for this symbol. */
13872 if (h != NULL)
13873 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
13874 else
13875 {
13876 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13877 if (head == NULL)
13878 return FALSE;
13879 }
13880
13881 p = *head;
13882 if (p == NULL || p->sec != sec)
13883 {
13884 bfd_size_type amt = sizeof *p;
13885
13886 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
13887 if (p == NULL)
13888 return FALSE;
13889 p->next = *head;
13890 *head = p;
13891 p->sec = sec;
13892 p->count = 0;
13893 p->pc_count = 0;
13894 }
13895
13896 if (elf32_arm_howto_from_type (r_type)->pc_relative)
13897 p->pc_count += 1;
13898 p->count += 1;
13899 }
13900 }
13901
13902 return TRUE;
13903 }
13904
13905 /* Unwinding tables are not referenced directly. This pass marks them as
13906 required if the corresponding code section is marked. */
13907
13908 static bfd_boolean
13909 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
13910 elf_gc_mark_hook_fn gc_mark_hook)
13911 {
13912 bfd *sub;
13913 Elf_Internal_Shdr **elf_shdrp;
13914 bfd_boolean again;
13915
13916 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
13917
13918 /* Marking EH data may cause additional code sections to be marked,
13919 requiring multiple passes. */
13920 again = TRUE;
13921 while (again)
13922 {
13923 again = FALSE;
13924 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
13925 {
13926 asection *o;
13927
13928 if (! is_arm_elf (sub))
13929 continue;
13930
13931 elf_shdrp = elf_elfsections (sub);
13932 for (o = sub->sections; o != NULL; o = o->next)
13933 {
13934 Elf_Internal_Shdr *hdr;
13935
13936 hdr = &elf_section_data (o)->this_hdr;
13937 if (hdr->sh_type == SHT_ARM_EXIDX
13938 && hdr->sh_link
13939 && hdr->sh_link < elf_numsections (sub)
13940 && !o->gc_mark
13941 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
13942 {
13943 again = TRUE;
13944 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
13945 return FALSE;
13946 }
13947 }
13948 }
13949 }
13950
13951 return TRUE;
13952 }
13953
13954 /* Treat mapping symbols as special target symbols. */
13955
13956 static bfd_boolean
13957 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
13958 {
13959 return bfd_is_arm_special_symbol_name (sym->name,
13960 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
13961 }
13962
13963 /* This is a copy of elf_find_function() from elf.c except that
13964 ARM mapping symbols are ignored when looking for function names
13965 and STT_ARM_TFUNC is considered to a function type. */
13966
13967 static bfd_boolean
13968 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
13969 asymbol ** symbols,
13970 asection * section,
13971 bfd_vma offset,
13972 const char ** filename_ptr,
13973 const char ** functionname_ptr)
13974 {
13975 const char * filename = NULL;
13976 asymbol * func = NULL;
13977 bfd_vma low_func = 0;
13978 asymbol ** p;
13979
13980 for (p = symbols; *p != NULL; p++)
13981 {
13982 elf_symbol_type *q;
13983
13984 q = (elf_symbol_type *) *p;
13985
13986 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
13987 {
13988 default:
13989 break;
13990 case STT_FILE:
13991 filename = bfd_asymbol_name (&q->symbol);
13992 break;
13993 case STT_FUNC:
13994 case STT_ARM_TFUNC:
13995 case STT_NOTYPE:
13996 /* Skip mapping symbols. */
13997 if ((q->symbol.flags & BSF_LOCAL)
13998 && bfd_is_arm_special_symbol_name (q->symbol.name,
13999 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14000 continue;
14001 /* Fall through. */
14002 if (bfd_get_section (&q->symbol) == section
14003 && q->symbol.value >= low_func
14004 && q->symbol.value <= offset)
14005 {
14006 func = (asymbol *) q;
14007 low_func = q->symbol.value;
14008 }
14009 break;
14010 }
14011 }
14012
14013 if (func == NULL)
14014 return FALSE;
14015
14016 if (filename_ptr)
14017 *filename_ptr = filename;
14018 if (functionname_ptr)
14019 *functionname_ptr = bfd_asymbol_name (func);
14020
14021 return TRUE;
14022 }
14023
14024
14025 /* Find the nearest line to a particular section and offset, for error
14026 reporting. This code is a duplicate of the code in elf.c, except
14027 that it uses arm_elf_find_function. */
14028
14029 static bfd_boolean
14030 elf32_arm_find_nearest_line (bfd * abfd,
14031 asymbol ** symbols,
14032 asection * section,
14033 bfd_vma offset,
14034 const char ** filename_ptr,
14035 const char ** functionname_ptr,
14036 unsigned int * line_ptr,
14037 unsigned int * discriminator_ptr)
14038 {
14039 bfd_boolean found = FALSE;
14040
14041 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14042 filename_ptr, functionname_ptr,
14043 line_ptr, discriminator_ptr,
14044 dwarf_debug_sections, 0,
14045 & elf_tdata (abfd)->dwarf2_find_line_info))
14046 {
14047 if (!*functionname_ptr)
14048 arm_elf_find_function (abfd, symbols, section, offset,
14049 *filename_ptr ? NULL : filename_ptr,
14050 functionname_ptr);
14051
14052 return TRUE;
14053 }
14054
14055 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14056 uses DWARF1. */
14057
14058 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14059 & found, filename_ptr,
14060 functionname_ptr, line_ptr,
14061 & elf_tdata (abfd)->line_info))
14062 return FALSE;
14063
14064 if (found && (*functionname_ptr || *line_ptr))
14065 return TRUE;
14066
14067 if (symbols == NULL)
14068 return FALSE;
14069
14070 if (! arm_elf_find_function (abfd, symbols, section, offset,
14071 filename_ptr, functionname_ptr))
14072 return FALSE;
14073
14074 *line_ptr = 0;
14075 return TRUE;
14076 }
14077
14078 static bfd_boolean
14079 elf32_arm_find_inliner_info (bfd * abfd,
14080 const char ** filename_ptr,
14081 const char ** functionname_ptr,
14082 unsigned int * line_ptr)
14083 {
14084 bfd_boolean found;
14085 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14086 functionname_ptr, line_ptr,
14087 & elf_tdata (abfd)->dwarf2_find_line_info);
14088 return found;
14089 }
14090
14091 /* Adjust a symbol defined by a dynamic object and referenced by a
14092 regular object. The current definition is in some section of the
14093 dynamic object, but we're not including those sections. We have to
14094 change the definition to something the rest of the link can
14095 understand. */
14096
14097 static bfd_boolean
14098 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14099 struct elf_link_hash_entry * h)
14100 {
14101 bfd * dynobj;
14102 asection * s;
14103 struct elf32_arm_link_hash_entry * eh;
14104 struct elf32_arm_link_hash_table *globals;
14105
14106 globals = elf32_arm_hash_table (info);
14107 if (globals == NULL)
14108 return FALSE;
14109
14110 dynobj = elf_hash_table (info)->dynobj;
14111
14112 /* Make sure we know what is going on here. */
14113 BFD_ASSERT (dynobj != NULL
14114 && (h->needs_plt
14115 || h->type == STT_GNU_IFUNC
14116 || h->u.weakdef != NULL
14117 || (h->def_dynamic
14118 && h->ref_regular
14119 && !h->def_regular)));
14120
14121 eh = (struct elf32_arm_link_hash_entry *) h;
14122
14123 /* If this is a function, put it in the procedure linkage table. We
14124 will fill in the contents of the procedure linkage table later,
14125 when we know the address of the .got section. */
14126 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14127 {
14128 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14129 symbol binds locally. */
14130 if (h->plt.refcount <= 0
14131 || (h->type != STT_GNU_IFUNC
14132 && (SYMBOL_CALLS_LOCAL (info, h)
14133 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14134 && h->root.type == bfd_link_hash_undefweak))))
14135 {
14136 /* This case can occur if we saw a PLT32 reloc in an input
14137 file, but the symbol was never referred to by a dynamic
14138 object, or if all references were garbage collected. In
14139 such a case, we don't actually need to build a procedure
14140 linkage table, and we can just do a PC24 reloc instead. */
14141 h->plt.offset = (bfd_vma) -1;
14142 eh->plt.thumb_refcount = 0;
14143 eh->plt.maybe_thumb_refcount = 0;
14144 eh->plt.noncall_refcount = 0;
14145 h->needs_plt = 0;
14146 }
14147
14148 return TRUE;
14149 }
14150 else
14151 {
14152 /* It's possible that we incorrectly decided a .plt reloc was
14153 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14154 in check_relocs. We can't decide accurately between function
14155 and non-function syms in check-relocs; Objects loaded later in
14156 the link may change h->type. So fix it now. */
14157 h->plt.offset = (bfd_vma) -1;
14158 eh->plt.thumb_refcount = 0;
14159 eh->plt.maybe_thumb_refcount = 0;
14160 eh->plt.noncall_refcount = 0;
14161 }
14162
14163 /* If this is a weak symbol, and there is a real definition, the
14164 processor independent code will have arranged for us to see the
14165 real definition first, and we can just use the same value. */
14166 if (h->u.weakdef != NULL)
14167 {
14168 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14169 || h->u.weakdef->root.type == bfd_link_hash_defweak);
14170 h->root.u.def.section = h->u.weakdef->root.u.def.section;
14171 h->root.u.def.value = h->u.weakdef->root.u.def.value;
14172 return TRUE;
14173 }
14174
14175 /* If there are no non-GOT references, we do not need a copy
14176 relocation. */
14177 if (!h->non_got_ref)
14178 return TRUE;
14179
14180 /* This is a reference to a symbol defined by a dynamic object which
14181 is not a function. */
14182
14183 /* If we are creating a shared library, we must presume that the
14184 only references to the symbol are via the global offset table.
14185 For such cases we need not do anything here; the relocations will
14186 be handled correctly by relocate_section. Relocatable executables
14187 can reference data in shared objects directly, so we don't need to
14188 do anything here. */
14189 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14190 return TRUE;
14191
14192 /* We must allocate the symbol in our .dynbss section, which will
14193 become part of the .bss section of the executable. There will be
14194 an entry for this symbol in the .dynsym section. The dynamic
14195 object will contain position independent code, so all references
14196 from the dynamic object to this symbol will go through the global
14197 offset table. The dynamic linker will use the .dynsym entry to
14198 determine the address it must put in the global offset table, so
14199 both the dynamic object and the regular object will refer to the
14200 same memory location for the variable. */
14201 s = bfd_get_linker_section (dynobj, ".dynbss");
14202 BFD_ASSERT (s != NULL);
14203
14204 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14205 linker to copy the initial value out of the dynamic object and into
14206 the runtime process image. We need to remember the offset into the
14207 .rel(a).bss section we are going to use. */
14208 if (info->nocopyreloc == 0
14209 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14210 && h->size != 0)
14211 {
14212 asection *srel;
14213
14214 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14215 elf32_arm_allocate_dynrelocs (info, srel, 1);
14216 h->needs_copy = 1;
14217 }
14218
14219 return _bfd_elf_adjust_dynamic_copy (info, h, s);
14220 }
14221
14222 /* Allocate space in .plt, .got and associated reloc sections for
14223 dynamic relocs. */
14224
14225 static bfd_boolean
14226 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14227 {
14228 struct bfd_link_info *info;
14229 struct elf32_arm_link_hash_table *htab;
14230 struct elf32_arm_link_hash_entry *eh;
14231 struct elf_dyn_relocs *p;
14232
14233 if (h->root.type == bfd_link_hash_indirect)
14234 return TRUE;
14235
14236 eh = (struct elf32_arm_link_hash_entry *) h;
14237
14238 info = (struct bfd_link_info *) inf;
14239 htab = elf32_arm_hash_table (info);
14240 if (htab == NULL)
14241 return FALSE;
14242
14243 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14244 && h->plt.refcount > 0)
14245 {
14246 /* Make sure this symbol is output as a dynamic symbol.
14247 Undefined weak syms won't yet be marked as dynamic. */
14248 if (h->dynindx == -1
14249 && !h->forced_local)
14250 {
14251 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14252 return FALSE;
14253 }
14254
14255 /* If the call in the PLT entry binds locally, the associated
14256 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14257 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14258 than the .plt section. */
14259 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14260 {
14261 eh->is_iplt = 1;
14262 if (eh->plt.noncall_refcount == 0
14263 && SYMBOL_REFERENCES_LOCAL (info, h))
14264 /* All non-call references can be resolved directly.
14265 This means that they can (and in some cases, must)
14266 resolve directly to the run-time target, rather than
14267 to the PLT. That in turns means that any .got entry
14268 would be equal to the .igot.plt entry, so there's
14269 no point having both. */
14270 h->got.refcount = 0;
14271 }
14272
14273 if (bfd_link_pic (info)
14274 || eh->is_iplt
14275 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14276 {
14277 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14278
14279 /* If this symbol is not defined in a regular file, and we are
14280 not generating a shared library, then set the symbol to this
14281 location in the .plt. This is required to make function
14282 pointers compare as equal between the normal executable and
14283 the shared library. */
14284 if (! bfd_link_pic (info)
14285 && !h->def_regular)
14286 {
14287 h->root.u.def.section = htab->root.splt;
14288 h->root.u.def.value = h->plt.offset;
14289
14290 /* Make sure the function is not marked as Thumb, in case
14291 it is the target of an ABS32 relocation, which will
14292 point to the PLT entry. */
14293 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14294 }
14295
14296 /* VxWorks executables have a second set of relocations for
14297 each PLT entry. They go in a separate relocation section,
14298 which is processed by the kernel loader. */
14299 if (htab->vxworks_p && !bfd_link_pic (info))
14300 {
14301 /* There is a relocation for the initial PLT entry:
14302 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14303 if (h->plt.offset == htab->plt_header_size)
14304 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14305
14306 /* There are two extra relocations for each subsequent
14307 PLT entry: an R_ARM_32 relocation for the GOT entry,
14308 and an R_ARM_32 relocation for the PLT entry. */
14309 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14310 }
14311 }
14312 else
14313 {
14314 h->plt.offset = (bfd_vma) -1;
14315 h->needs_plt = 0;
14316 }
14317 }
14318 else
14319 {
14320 h->plt.offset = (bfd_vma) -1;
14321 h->needs_plt = 0;
14322 }
14323
14324 eh = (struct elf32_arm_link_hash_entry *) h;
14325 eh->tlsdesc_got = (bfd_vma) -1;
14326
14327 if (h->got.refcount > 0)
14328 {
14329 asection *s;
14330 bfd_boolean dyn;
14331 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14332 int indx;
14333
14334 /* Make sure this symbol is output as a dynamic symbol.
14335 Undefined weak syms won't yet be marked as dynamic. */
14336 if (h->dynindx == -1
14337 && !h->forced_local)
14338 {
14339 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14340 return FALSE;
14341 }
14342
14343 if (!htab->symbian_p)
14344 {
14345 s = htab->root.sgot;
14346 h->got.offset = s->size;
14347
14348 if (tls_type == GOT_UNKNOWN)
14349 abort ();
14350
14351 if (tls_type == GOT_NORMAL)
14352 /* Non-TLS symbols need one GOT slot. */
14353 s->size += 4;
14354 else
14355 {
14356 if (tls_type & GOT_TLS_GDESC)
14357 {
14358 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14359 eh->tlsdesc_got
14360 = (htab->root.sgotplt->size
14361 - elf32_arm_compute_jump_table_size (htab));
14362 htab->root.sgotplt->size += 8;
14363 h->got.offset = (bfd_vma) -2;
14364 /* plt.got_offset needs to know there's a TLS_DESC
14365 reloc in the middle of .got.plt. */
14366 htab->num_tls_desc++;
14367 }
14368
14369 if (tls_type & GOT_TLS_GD)
14370 {
14371 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14372 the symbol is both GD and GDESC, got.offset may
14373 have been overwritten. */
14374 h->got.offset = s->size;
14375 s->size += 8;
14376 }
14377
14378 if (tls_type & GOT_TLS_IE)
14379 /* R_ARM_TLS_IE32 needs one GOT slot. */
14380 s->size += 4;
14381 }
14382
14383 dyn = htab->root.dynamic_sections_created;
14384
14385 indx = 0;
14386 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14387 bfd_link_pic (info),
14388 h)
14389 && (!bfd_link_pic (info)
14390 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14391 indx = h->dynindx;
14392
14393 if (tls_type != GOT_NORMAL
14394 && (bfd_link_pic (info) || indx != 0)
14395 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14396 || h->root.type != bfd_link_hash_undefweak))
14397 {
14398 if (tls_type & GOT_TLS_IE)
14399 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14400
14401 if (tls_type & GOT_TLS_GD)
14402 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14403
14404 if (tls_type & GOT_TLS_GDESC)
14405 {
14406 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14407 /* GDESC needs a trampoline to jump to. */
14408 htab->tls_trampoline = -1;
14409 }
14410
14411 /* Only GD needs it. GDESC just emits one relocation per
14412 2 entries. */
14413 if ((tls_type & GOT_TLS_GD) && indx != 0)
14414 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14415 }
14416 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14417 {
14418 if (htab->root.dynamic_sections_created)
14419 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14420 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14421 }
14422 else if (h->type == STT_GNU_IFUNC
14423 && eh->plt.noncall_refcount == 0)
14424 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14425 they all resolve dynamically instead. Reserve room for the
14426 GOT entry's R_ARM_IRELATIVE relocation. */
14427 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14428 else if (bfd_link_pic (info)
14429 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14430 || h->root.type != bfd_link_hash_undefweak))
14431 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14432 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14433 }
14434 }
14435 else
14436 h->got.offset = (bfd_vma) -1;
14437
14438 /* Allocate stubs for exported Thumb functions on v4t. */
14439 if (!htab->use_blx && h->dynindx != -1
14440 && h->def_regular
14441 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14442 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14443 {
14444 struct elf_link_hash_entry * th;
14445 struct bfd_link_hash_entry * bh;
14446 struct elf_link_hash_entry * myh;
14447 char name[1024];
14448 asection *s;
14449 bh = NULL;
14450 /* Create a new symbol to regist the real location of the function. */
14451 s = h->root.u.def.section;
14452 sprintf (name, "__real_%s", h->root.root.string);
14453 _bfd_generic_link_add_one_symbol (info, s->owner,
14454 name, BSF_GLOBAL, s,
14455 h->root.u.def.value,
14456 NULL, TRUE, FALSE, &bh);
14457
14458 myh = (struct elf_link_hash_entry *) bh;
14459 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14460 myh->forced_local = 1;
14461 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14462 eh->export_glue = myh;
14463 th = record_arm_to_thumb_glue (info, h);
14464 /* Point the symbol at the stub. */
14465 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14466 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14467 h->root.u.def.section = th->root.u.def.section;
14468 h->root.u.def.value = th->root.u.def.value & ~1;
14469 }
14470
14471 if (eh->dyn_relocs == NULL)
14472 return TRUE;
14473
14474 /* In the shared -Bsymbolic case, discard space allocated for
14475 dynamic pc-relative relocs against symbols which turn out to be
14476 defined in regular objects. For the normal shared case, discard
14477 space for pc-relative relocs that have become local due to symbol
14478 visibility changes. */
14479
14480 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14481 {
14482 /* Relocs that use pc_count are PC-relative forms, which will appear
14483 on something like ".long foo - ." or "movw REG, foo - .". We want
14484 calls to protected symbols to resolve directly to the function
14485 rather than going via the plt. If people want function pointer
14486 comparisons to work as expected then they should avoid writing
14487 assembly like ".long foo - .". */
14488 if (SYMBOL_CALLS_LOCAL (info, h))
14489 {
14490 struct elf_dyn_relocs **pp;
14491
14492 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14493 {
14494 p->count -= p->pc_count;
14495 p->pc_count = 0;
14496 if (p->count == 0)
14497 *pp = p->next;
14498 else
14499 pp = &p->next;
14500 }
14501 }
14502
14503 if (htab->vxworks_p)
14504 {
14505 struct elf_dyn_relocs **pp;
14506
14507 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14508 {
14509 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14510 *pp = p->next;
14511 else
14512 pp = &p->next;
14513 }
14514 }
14515
14516 /* Also discard relocs on undefined weak syms with non-default
14517 visibility. */
14518 if (eh->dyn_relocs != NULL
14519 && h->root.type == bfd_link_hash_undefweak)
14520 {
14521 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14522 eh->dyn_relocs = NULL;
14523
14524 /* Make sure undefined weak symbols are output as a dynamic
14525 symbol in PIEs. */
14526 else if (h->dynindx == -1
14527 && !h->forced_local)
14528 {
14529 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14530 return FALSE;
14531 }
14532 }
14533
14534 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14535 && h->root.type == bfd_link_hash_new)
14536 {
14537 /* Output absolute symbols so that we can create relocations
14538 against them. For normal symbols we output a relocation
14539 against the section that contains them. */
14540 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14541 return FALSE;
14542 }
14543
14544 }
14545 else
14546 {
14547 /* For the non-shared case, discard space for relocs against
14548 symbols which turn out to need copy relocs or are not
14549 dynamic. */
14550
14551 if (!h->non_got_ref
14552 && ((h->def_dynamic
14553 && !h->def_regular)
14554 || (htab->root.dynamic_sections_created
14555 && (h->root.type == bfd_link_hash_undefweak
14556 || h->root.type == bfd_link_hash_undefined))))
14557 {
14558 /* Make sure this symbol is output as a dynamic symbol.
14559 Undefined weak syms won't yet be marked as dynamic. */
14560 if (h->dynindx == -1
14561 && !h->forced_local)
14562 {
14563 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14564 return FALSE;
14565 }
14566
14567 /* If that succeeded, we know we'll be keeping all the
14568 relocs. */
14569 if (h->dynindx != -1)
14570 goto keep;
14571 }
14572
14573 eh->dyn_relocs = NULL;
14574
14575 keep: ;
14576 }
14577
14578 /* Finally, allocate space. */
14579 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14580 {
14581 asection *sreloc = elf_section_data (p->sec)->sreloc;
14582 if (h->type == STT_GNU_IFUNC
14583 && eh->plt.noncall_refcount == 0
14584 && SYMBOL_REFERENCES_LOCAL (info, h))
14585 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14586 else
14587 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14588 }
14589
14590 return TRUE;
14591 }
14592
14593 /* Find any dynamic relocs that apply to read-only sections. */
14594
14595 static bfd_boolean
14596 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14597 {
14598 struct elf32_arm_link_hash_entry * eh;
14599 struct elf_dyn_relocs * p;
14600
14601 eh = (struct elf32_arm_link_hash_entry *) h;
14602 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14603 {
14604 asection *s = p->sec;
14605
14606 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14607 {
14608 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14609
14610 info->flags |= DF_TEXTREL;
14611
14612 /* Not an error, just cut short the traversal. */
14613 return FALSE;
14614 }
14615 }
14616 return TRUE;
14617 }
14618
14619 void
14620 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14621 int byteswap_code)
14622 {
14623 struct elf32_arm_link_hash_table *globals;
14624
14625 globals = elf32_arm_hash_table (info);
14626 if (globals == NULL)
14627 return;
14628
14629 globals->byteswap_code = byteswap_code;
14630 }
14631
14632 /* Set the sizes of the dynamic sections. */
14633
14634 static bfd_boolean
14635 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14636 struct bfd_link_info * info)
14637 {
14638 bfd * dynobj;
14639 asection * s;
14640 bfd_boolean plt;
14641 bfd_boolean relocs;
14642 bfd *ibfd;
14643 struct elf32_arm_link_hash_table *htab;
14644
14645 htab = elf32_arm_hash_table (info);
14646 if (htab == NULL)
14647 return FALSE;
14648
14649 dynobj = elf_hash_table (info)->dynobj;
14650 BFD_ASSERT (dynobj != NULL);
14651 check_use_blx (htab);
14652
14653 if (elf_hash_table (info)->dynamic_sections_created)
14654 {
14655 /* Set the contents of the .interp section to the interpreter. */
14656 if (bfd_link_executable (info) && !info->nointerp)
14657 {
14658 s = bfd_get_linker_section (dynobj, ".interp");
14659 BFD_ASSERT (s != NULL);
14660 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14661 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14662 }
14663 }
14664
14665 /* Set up .got offsets for local syms, and space for local dynamic
14666 relocs. */
14667 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14668 {
14669 bfd_signed_vma *local_got;
14670 bfd_signed_vma *end_local_got;
14671 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14672 char *local_tls_type;
14673 bfd_vma *local_tlsdesc_gotent;
14674 bfd_size_type locsymcount;
14675 Elf_Internal_Shdr *symtab_hdr;
14676 asection *srel;
14677 bfd_boolean is_vxworks = htab->vxworks_p;
14678 unsigned int symndx;
14679
14680 if (! is_arm_elf (ibfd))
14681 continue;
14682
14683 for (s = ibfd->sections; s != NULL; s = s->next)
14684 {
14685 struct elf_dyn_relocs *p;
14686
14687 for (p = (struct elf_dyn_relocs *)
14688 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14689 {
14690 if (!bfd_is_abs_section (p->sec)
14691 && bfd_is_abs_section (p->sec->output_section))
14692 {
14693 /* Input section has been discarded, either because
14694 it is a copy of a linkonce section or due to
14695 linker script /DISCARD/, so we'll be discarding
14696 the relocs too. */
14697 }
14698 else if (is_vxworks
14699 && strcmp (p->sec->output_section->name,
14700 ".tls_vars") == 0)
14701 {
14702 /* Relocations in vxworks .tls_vars sections are
14703 handled specially by the loader. */
14704 }
14705 else if (p->count != 0)
14706 {
14707 srel = elf_section_data (p->sec)->sreloc;
14708 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14709 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14710 info->flags |= DF_TEXTREL;
14711 }
14712 }
14713 }
14714
14715 local_got = elf_local_got_refcounts (ibfd);
14716 if (!local_got)
14717 continue;
14718
14719 symtab_hdr = & elf_symtab_hdr (ibfd);
14720 locsymcount = symtab_hdr->sh_info;
14721 end_local_got = local_got + locsymcount;
14722 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14723 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14724 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14725 symndx = 0;
14726 s = htab->root.sgot;
14727 srel = htab->root.srelgot;
14728 for (; local_got < end_local_got;
14729 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14730 ++local_tlsdesc_gotent, ++symndx)
14731 {
14732 *local_tlsdesc_gotent = (bfd_vma) -1;
14733 local_iplt = *local_iplt_ptr;
14734 if (local_iplt != NULL)
14735 {
14736 struct elf_dyn_relocs *p;
14737
14738 if (local_iplt->root.refcount > 0)
14739 {
14740 elf32_arm_allocate_plt_entry (info, TRUE,
14741 &local_iplt->root,
14742 &local_iplt->arm);
14743 if (local_iplt->arm.noncall_refcount == 0)
14744 /* All references to the PLT are calls, so all
14745 non-call references can resolve directly to the
14746 run-time target. This means that the .got entry
14747 would be the same as the .igot.plt entry, so there's
14748 no point creating both. */
14749 *local_got = 0;
14750 }
14751 else
14752 {
14753 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14754 local_iplt->root.offset = (bfd_vma) -1;
14755 }
14756
14757 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14758 {
14759 asection *psrel;
14760
14761 psrel = elf_section_data (p->sec)->sreloc;
14762 if (local_iplt->arm.noncall_refcount == 0)
14763 elf32_arm_allocate_irelocs (info, psrel, p->count);
14764 else
14765 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14766 }
14767 }
14768 if (*local_got > 0)
14769 {
14770 Elf_Internal_Sym *isym;
14771
14772 *local_got = s->size;
14773 if (*local_tls_type & GOT_TLS_GD)
14774 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14775 s->size += 8;
14776 if (*local_tls_type & GOT_TLS_GDESC)
14777 {
14778 *local_tlsdesc_gotent = htab->root.sgotplt->size
14779 - elf32_arm_compute_jump_table_size (htab);
14780 htab->root.sgotplt->size += 8;
14781 *local_got = (bfd_vma) -2;
14782 /* plt.got_offset needs to know there's a TLS_DESC
14783 reloc in the middle of .got.plt. */
14784 htab->num_tls_desc++;
14785 }
14786 if (*local_tls_type & GOT_TLS_IE)
14787 s->size += 4;
14788
14789 if (*local_tls_type & GOT_NORMAL)
14790 {
14791 /* If the symbol is both GD and GDESC, *local_got
14792 may have been overwritten. */
14793 *local_got = s->size;
14794 s->size += 4;
14795 }
14796
14797 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14798 if (isym == NULL)
14799 return FALSE;
14800
14801 /* If all references to an STT_GNU_IFUNC PLT are calls,
14802 then all non-call references, including this GOT entry,
14803 resolve directly to the run-time target. */
14804 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
14805 && (local_iplt == NULL
14806 || local_iplt->arm.noncall_refcount == 0))
14807 elf32_arm_allocate_irelocs (info, srel, 1);
14808 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
14809 {
14810 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
14811 || *local_tls_type & GOT_TLS_GD)
14812 elf32_arm_allocate_dynrelocs (info, srel, 1);
14813
14814 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
14815 {
14816 elf32_arm_allocate_dynrelocs (info,
14817 htab->root.srelplt, 1);
14818 htab->tls_trampoline = -1;
14819 }
14820 }
14821 }
14822 else
14823 *local_got = (bfd_vma) -1;
14824 }
14825 }
14826
14827 if (htab->tls_ldm_got.refcount > 0)
14828 {
14829 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14830 for R_ARM_TLS_LDM32 relocations. */
14831 htab->tls_ldm_got.offset = htab->root.sgot->size;
14832 htab->root.sgot->size += 8;
14833 if (bfd_link_pic (info))
14834 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14835 }
14836 else
14837 htab->tls_ldm_got.offset = -1;
14838
14839 /* Allocate global sym .plt and .got entries, and space for global
14840 sym dynamic relocs. */
14841 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
14842
14843 /* Here we rummage through the found bfds to collect glue information. */
14844 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14845 {
14846 if (! is_arm_elf (ibfd))
14847 continue;
14848
14849 /* Initialise mapping tables for code/data. */
14850 bfd_elf32_arm_init_maps (ibfd);
14851
14852 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
14853 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
14854 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
14855 /* xgettext:c-format */
14856 _bfd_error_handler (_("Errors encountered processing file %s"),
14857 ibfd->filename);
14858 }
14859
14860 /* Allocate space for the glue sections now that we've sized them. */
14861 bfd_elf32_arm_allocate_interworking_sections (info);
14862
14863 /* For every jump slot reserved in the sgotplt, reloc_count is
14864 incremented. However, when we reserve space for TLS descriptors,
14865 it's not incremented, so in order to compute the space reserved
14866 for them, it suffices to multiply the reloc count by the jump
14867 slot size. */
14868 if (htab->root.srelplt)
14869 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
14870
14871 if (htab->tls_trampoline)
14872 {
14873 if (htab->root.splt->size == 0)
14874 htab->root.splt->size += htab->plt_header_size;
14875
14876 htab->tls_trampoline = htab->root.splt->size;
14877 htab->root.splt->size += htab->plt_entry_size;
14878
14879 /* If we're not using lazy TLS relocations, don't generate the
14880 PLT and GOT entries they require. */
14881 if (!(info->flags & DF_BIND_NOW))
14882 {
14883 htab->dt_tlsdesc_got = htab->root.sgot->size;
14884 htab->root.sgot->size += 4;
14885
14886 htab->dt_tlsdesc_plt = htab->root.splt->size;
14887 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
14888 }
14889 }
14890
14891 /* The check_relocs and adjust_dynamic_symbol entry points have
14892 determined the sizes of the various dynamic sections. Allocate
14893 memory for them. */
14894 plt = FALSE;
14895 relocs = FALSE;
14896 for (s = dynobj->sections; s != NULL; s = s->next)
14897 {
14898 const char * name;
14899
14900 if ((s->flags & SEC_LINKER_CREATED) == 0)
14901 continue;
14902
14903 /* It's OK to base decisions on the section name, because none
14904 of the dynobj section names depend upon the input files. */
14905 name = bfd_get_section_name (dynobj, s);
14906
14907 if (s == htab->root.splt)
14908 {
14909 /* Remember whether there is a PLT. */
14910 plt = s->size != 0;
14911 }
14912 else if (CONST_STRNEQ (name, ".rel"))
14913 {
14914 if (s->size != 0)
14915 {
14916 /* Remember whether there are any reloc sections other
14917 than .rel(a).plt and .rela.plt.unloaded. */
14918 if (s != htab->root.srelplt && s != htab->srelplt2)
14919 relocs = TRUE;
14920
14921 /* We use the reloc_count field as a counter if we need
14922 to copy relocs into the output file. */
14923 s->reloc_count = 0;
14924 }
14925 }
14926 else if (s != htab->root.sgot
14927 && s != htab->root.sgotplt
14928 && s != htab->root.iplt
14929 && s != htab->root.igotplt
14930 && s != htab->sdynbss)
14931 {
14932 /* It's not one of our sections, so don't allocate space. */
14933 continue;
14934 }
14935
14936 if (s->size == 0)
14937 {
14938 /* If we don't need this section, strip it from the
14939 output file. This is mostly to handle .rel(a).bss and
14940 .rel(a).plt. We must create both sections in
14941 create_dynamic_sections, because they must be created
14942 before the linker maps input sections to output
14943 sections. The linker does that before
14944 adjust_dynamic_symbol is called, and it is that
14945 function which decides whether anything needs to go
14946 into these sections. */
14947 s->flags |= SEC_EXCLUDE;
14948 continue;
14949 }
14950
14951 if ((s->flags & SEC_HAS_CONTENTS) == 0)
14952 continue;
14953
14954 /* Allocate memory for the section contents. */
14955 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
14956 if (s->contents == NULL)
14957 return FALSE;
14958 }
14959
14960 if (elf_hash_table (info)->dynamic_sections_created)
14961 {
14962 /* Add some entries to the .dynamic section. We fill in the
14963 values later, in elf32_arm_finish_dynamic_sections, but we
14964 must add the entries now so that we get the correct size for
14965 the .dynamic section. The DT_DEBUG entry is filled in by the
14966 dynamic linker and used by the debugger. */
14967 #define add_dynamic_entry(TAG, VAL) \
14968 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
14969
14970 if (bfd_link_executable (info))
14971 {
14972 if (!add_dynamic_entry (DT_DEBUG, 0))
14973 return FALSE;
14974 }
14975
14976 if (plt)
14977 {
14978 if ( !add_dynamic_entry (DT_PLTGOT, 0)
14979 || !add_dynamic_entry (DT_PLTRELSZ, 0)
14980 || !add_dynamic_entry (DT_PLTREL,
14981 htab->use_rel ? DT_REL : DT_RELA)
14982 || !add_dynamic_entry (DT_JMPREL, 0))
14983 return FALSE;
14984
14985 if (htab->dt_tlsdesc_plt &&
14986 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
14987 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
14988 return FALSE;
14989 }
14990
14991 if (relocs)
14992 {
14993 if (htab->use_rel)
14994 {
14995 if (!add_dynamic_entry (DT_REL, 0)
14996 || !add_dynamic_entry (DT_RELSZ, 0)
14997 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
14998 return FALSE;
14999 }
15000 else
15001 {
15002 if (!add_dynamic_entry (DT_RELA, 0)
15003 || !add_dynamic_entry (DT_RELASZ, 0)
15004 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15005 return FALSE;
15006 }
15007 }
15008
15009 /* If any dynamic relocs apply to a read-only section,
15010 then we need a DT_TEXTREL entry. */
15011 if ((info->flags & DF_TEXTREL) == 0)
15012 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15013 info);
15014
15015 if ((info->flags & DF_TEXTREL) != 0)
15016 {
15017 if (!add_dynamic_entry (DT_TEXTREL, 0))
15018 return FALSE;
15019 }
15020 if (htab->vxworks_p
15021 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15022 return FALSE;
15023 }
15024 #undef add_dynamic_entry
15025
15026 return TRUE;
15027 }
15028
15029 /* Size sections even though they're not dynamic. We use it to setup
15030 _TLS_MODULE_BASE_, if needed. */
15031
15032 static bfd_boolean
15033 elf32_arm_always_size_sections (bfd *output_bfd,
15034 struct bfd_link_info *info)
15035 {
15036 asection *tls_sec;
15037
15038 if (bfd_link_relocatable (info))
15039 return TRUE;
15040
15041 tls_sec = elf_hash_table (info)->tls_sec;
15042
15043 if (tls_sec)
15044 {
15045 struct elf_link_hash_entry *tlsbase;
15046
15047 tlsbase = elf_link_hash_lookup
15048 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15049
15050 if (tlsbase)
15051 {
15052 struct bfd_link_hash_entry *bh = NULL;
15053 const struct elf_backend_data *bed
15054 = get_elf_backend_data (output_bfd);
15055
15056 if (!(_bfd_generic_link_add_one_symbol
15057 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15058 tls_sec, 0, NULL, FALSE,
15059 bed->collect, &bh)))
15060 return FALSE;
15061
15062 tlsbase->type = STT_TLS;
15063 tlsbase = (struct elf_link_hash_entry *)bh;
15064 tlsbase->def_regular = 1;
15065 tlsbase->other = STV_HIDDEN;
15066 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15067 }
15068 }
15069 return TRUE;
15070 }
15071
15072 /* Finish up dynamic symbol handling. We set the contents of various
15073 dynamic sections here. */
15074
15075 static bfd_boolean
15076 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15077 struct bfd_link_info * info,
15078 struct elf_link_hash_entry * h,
15079 Elf_Internal_Sym * sym)
15080 {
15081 struct elf32_arm_link_hash_table *htab;
15082 struct elf32_arm_link_hash_entry *eh;
15083
15084 htab = elf32_arm_hash_table (info);
15085 if (htab == NULL)
15086 return FALSE;
15087
15088 eh = (struct elf32_arm_link_hash_entry *) h;
15089
15090 if (h->plt.offset != (bfd_vma) -1)
15091 {
15092 if (!eh->is_iplt)
15093 {
15094 BFD_ASSERT (h->dynindx != -1);
15095 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15096 h->dynindx, 0))
15097 return FALSE;
15098 }
15099
15100 if (!h->def_regular)
15101 {
15102 /* Mark the symbol as undefined, rather than as defined in
15103 the .plt section. */
15104 sym->st_shndx = SHN_UNDEF;
15105 /* If the symbol is weak we need to clear the value.
15106 Otherwise, the PLT entry would provide a definition for
15107 the symbol even if the symbol wasn't defined anywhere,
15108 and so the symbol would never be NULL. Leave the value if
15109 there were any relocations where pointer equality matters
15110 (this is a clue for the dynamic linker, to make function
15111 pointer comparisons work between an application and shared
15112 library). */
15113 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15114 sym->st_value = 0;
15115 }
15116 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15117 {
15118 /* At least one non-call relocation references this .iplt entry,
15119 so the .iplt entry is the function's canonical address. */
15120 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15121 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15122 sym->st_shndx = (_bfd_elf_section_from_bfd_section
15123 (output_bfd, htab->root.iplt->output_section));
15124 sym->st_value = (h->plt.offset
15125 + htab->root.iplt->output_section->vma
15126 + htab->root.iplt->output_offset);
15127 }
15128 }
15129
15130 if (h->needs_copy)
15131 {
15132 asection * s;
15133 Elf_Internal_Rela rel;
15134
15135 /* This symbol needs a copy reloc. Set it up. */
15136 BFD_ASSERT (h->dynindx != -1
15137 && (h->root.type == bfd_link_hash_defined
15138 || h->root.type == bfd_link_hash_defweak));
15139
15140 s = htab->srelbss;
15141 BFD_ASSERT (s != NULL);
15142
15143 rel.r_addend = 0;
15144 rel.r_offset = (h->root.u.def.value
15145 + h->root.u.def.section->output_section->vma
15146 + h->root.u.def.section->output_offset);
15147 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15148 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15149 }
15150
15151 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15152 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15153 to the ".got" section. */
15154 if (h == htab->root.hdynamic
15155 || (!htab->vxworks_p && h == htab->root.hgot))
15156 sym->st_shndx = SHN_ABS;
15157
15158 return TRUE;
15159 }
15160
15161 static void
15162 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15163 void *contents,
15164 const unsigned long *template, unsigned count)
15165 {
15166 unsigned ix;
15167
15168 for (ix = 0; ix != count; ix++)
15169 {
15170 unsigned long insn = template[ix];
15171
15172 /* Emit mov pc,rx if bx is not permitted. */
15173 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15174 insn = (insn & 0xf000000f) | 0x01a0f000;
15175 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15176 }
15177 }
15178
15179 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15180 other variants, NaCl needs this entry in a static executable's
15181 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15182 zero. For .iplt really only the last bundle is useful, and .iplt
15183 could have a shorter first entry, with each individual PLT entry's
15184 relative branch calculated differently so it targets the last
15185 bundle instead of the instruction before it (labelled .Lplt_tail
15186 above). But it's simpler to keep the size and layout of PLT0
15187 consistent with the dynamic case, at the cost of some dead code at
15188 the start of .iplt and the one dead store to the stack at the start
15189 of .Lplt_tail. */
15190 static void
15191 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15192 asection *plt, bfd_vma got_displacement)
15193 {
15194 unsigned int i;
15195
15196 put_arm_insn (htab, output_bfd,
15197 elf32_arm_nacl_plt0_entry[0]
15198 | arm_movw_immediate (got_displacement),
15199 plt->contents + 0);
15200 put_arm_insn (htab, output_bfd,
15201 elf32_arm_nacl_plt0_entry[1]
15202 | arm_movt_immediate (got_displacement),
15203 plt->contents + 4);
15204
15205 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15206 put_arm_insn (htab, output_bfd,
15207 elf32_arm_nacl_plt0_entry[i],
15208 plt->contents + (i * 4));
15209 }
15210
15211 /* Finish up the dynamic sections. */
15212
15213 static bfd_boolean
15214 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15215 {
15216 bfd * dynobj;
15217 asection * sgot;
15218 asection * sdyn;
15219 struct elf32_arm_link_hash_table *htab;
15220
15221 htab = elf32_arm_hash_table (info);
15222 if (htab == NULL)
15223 return FALSE;
15224
15225 dynobj = elf_hash_table (info)->dynobj;
15226
15227 sgot = htab->root.sgotplt;
15228 /* A broken linker script might have discarded the dynamic sections.
15229 Catch this here so that we do not seg-fault later on. */
15230 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15231 return FALSE;
15232 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15233
15234 if (elf_hash_table (info)->dynamic_sections_created)
15235 {
15236 asection *splt;
15237 Elf32_External_Dyn *dyncon, *dynconend;
15238
15239 splt = htab->root.splt;
15240 BFD_ASSERT (splt != NULL && sdyn != NULL);
15241 BFD_ASSERT (htab->symbian_p || sgot != NULL);
15242
15243 dyncon = (Elf32_External_Dyn *) sdyn->contents;
15244 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15245
15246 for (; dyncon < dynconend; dyncon++)
15247 {
15248 Elf_Internal_Dyn dyn;
15249 const char * name;
15250 asection * s;
15251
15252 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15253
15254 switch (dyn.d_tag)
15255 {
15256 unsigned int type;
15257
15258 default:
15259 if (htab->vxworks_p
15260 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15261 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15262 break;
15263
15264 case DT_HASH:
15265 name = ".hash";
15266 goto get_vma_if_bpabi;
15267 case DT_STRTAB:
15268 name = ".dynstr";
15269 goto get_vma_if_bpabi;
15270 case DT_SYMTAB:
15271 name = ".dynsym";
15272 goto get_vma_if_bpabi;
15273 case DT_VERSYM:
15274 name = ".gnu.version";
15275 goto get_vma_if_bpabi;
15276 case DT_VERDEF:
15277 name = ".gnu.version_d";
15278 goto get_vma_if_bpabi;
15279 case DT_VERNEED:
15280 name = ".gnu.version_r";
15281 goto get_vma_if_bpabi;
15282
15283 case DT_PLTGOT:
15284 name = htab->symbian_p ? ".got" : ".got.plt";
15285 goto get_vma;
15286 case DT_JMPREL:
15287 name = RELOC_SECTION (htab, ".plt");
15288 get_vma:
15289 s = bfd_get_linker_section (dynobj, name);
15290 if (s == NULL)
15291 {
15292 (*_bfd_error_handler)
15293 (_("could not find section %s"), name);
15294 bfd_set_error (bfd_error_invalid_operation);
15295 return FALSE;
15296 }
15297 if (!htab->symbian_p)
15298 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15299 else
15300 /* In the BPABI, tags in the PT_DYNAMIC section point
15301 at the file offset, not the memory address, for the
15302 convenience of the post linker. */
15303 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15304 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15305 break;
15306
15307 get_vma_if_bpabi:
15308 if (htab->symbian_p)
15309 goto get_vma;
15310 break;
15311
15312 case DT_PLTRELSZ:
15313 s = htab->root.srelplt;
15314 BFD_ASSERT (s != NULL);
15315 dyn.d_un.d_val = s->size;
15316 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15317 break;
15318
15319 case DT_RELSZ:
15320 case DT_RELASZ:
15321 if (!htab->symbian_p)
15322 {
15323 /* My reading of the SVR4 ABI indicates that the
15324 procedure linkage table relocs (DT_JMPREL) should be
15325 included in the overall relocs (DT_REL). This is
15326 what Solaris does. However, UnixWare can not handle
15327 that case. Therefore, we override the DT_RELSZ entry
15328 here to make it not include the JMPREL relocs. Since
15329 the linker script arranges for .rel(a).plt to follow all
15330 other relocation sections, we don't have to worry
15331 about changing the DT_REL entry. */
15332 s = htab->root.srelplt;
15333 if (s != NULL)
15334 dyn.d_un.d_val -= s->size;
15335 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15336 break;
15337 }
15338 /* Fall through. */
15339
15340 case DT_REL:
15341 case DT_RELA:
15342 /* In the BPABI, the DT_REL tag must point at the file
15343 offset, not the VMA, of the first relocation
15344 section. So, we use code similar to that in
15345 elflink.c, but do not check for SHF_ALLOC on the
15346 relcoation section, since relocations sections are
15347 never allocated under the BPABI. The comments above
15348 about Unixware notwithstanding, we include all of the
15349 relocations here. */
15350 if (htab->symbian_p)
15351 {
15352 unsigned int i;
15353 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15354 ? SHT_REL : SHT_RELA);
15355 dyn.d_un.d_val = 0;
15356 for (i = 1; i < elf_numsections (output_bfd); i++)
15357 {
15358 Elf_Internal_Shdr *hdr
15359 = elf_elfsections (output_bfd)[i];
15360 if (hdr->sh_type == type)
15361 {
15362 if (dyn.d_tag == DT_RELSZ
15363 || dyn.d_tag == DT_RELASZ)
15364 dyn.d_un.d_val += hdr->sh_size;
15365 else if ((ufile_ptr) hdr->sh_offset
15366 <= dyn.d_un.d_val - 1)
15367 dyn.d_un.d_val = hdr->sh_offset;
15368 }
15369 }
15370 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15371 }
15372 break;
15373
15374 case DT_TLSDESC_PLT:
15375 s = htab->root.splt;
15376 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15377 + htab->dt_tlsdesc_plt);
15378 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15379 break;
15380
15381 case DT_TLSDESC_GOT:
15382 s = htab->root.sgot;
15383 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15384 + htab->dt_tlsdesc_got);
15385 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15386 break;
15387
15388 /* Set the bottom bit of DT_INIT/FINI if the
15389 corresponding function is Thumb. */
15390 case DT_INIT:
15391 name = info->init_function;
15392 goto get_sym;
15393 case DT_FINI:
15394 name = info->fini_function;
15395 get_sym:
15396 /* If it wasn't set by elf_bfd_final_link
15397 then there is nothing to adjust. */
15398 if (dyn.d_un.d_val != 0)
15399 {
15400 struct elf_link_hash_entry * eh;
15401
15402 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15403 FALSE, FALSE, TRUE);
15404 if (eh != NULL
15405 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15406 == ST_BRANCH_TO_THUMB)
15407 {
15408 dyn.d_un.d_val |= 1;
15409 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15410 }
15411 }
15412 break;
15413 }
15414 }
15415
15416 /* Fill in the first entry in the procedure linkage table. */
15417 if (splt->size > 0 && htab->plt_header_size)
15418 {
15419 const bfd_vma *plt0_entry;
15420 bfd_vma got_address, plt_address, got_displacement;
15421
15422 /* Calculate the addresses of the GOT and PLT. */
15423 got_address = sgot->output_section->vma + sgot->output_offset;
15424 plt_address = splt->output_section->vma + splt->output_offset;
15425
15426 if (htab->vxworks_p)
15427 {
15428 /* The VxWorks GOT is relocated by the dynamic linker.
15429 Therefore, we must emit relocations rather than simply
15430 computing the values now. */
15431 Elf_Internal_Rela rel;
15432
15433 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15434 put_arm_insn (htab, output_bfd, plt0_entry[0],
15435 splt->contents + 0);
15436 put_arm_insn (htab, output_bfd, plt0_entry[1],
15437 splt->contents + 4);
15438 put_arm_insn (htab, output_bfd, plt0_entry[2],
15439 splt->contents + 8);
15440 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15441
15442 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15443 rel.r_offset = plt_address + 12;
15444 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15445 rel.r_addend = 0;
15446 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15447 htab->srelplt2->contents);
15448 }
15449 else if (htab->nacl_p)
15450 arm_nacl_put_plt0 (htab, output_bfd, splt,
15451 got_address + 8 - (plt_address + 16));
15452 else if (using_thumb_only (htab))
15453 {
15454 got_displacement = got_address - (plt_address + 12);
15455
15456 plt0_entry = elf32_thumb2_plt0_entry;
15457 put_arm_insn (htab, output_bfd, plt0_entry[0],
15458 splt->contents + 0);
15459 put_arm_insn (htab, output_bfd, plt0_entry[1],
15460 splt->contents + 4);
15461 put_arm_insn (htab, output_bfd, plt0_entry[2],
15462 splt->contents + 8);
15463
15464 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15465 }
15466 else
15467 {
15468 got_displacement = got_address - (plt_address + 16);
15469
15470 plt0_entry = elf32_arm_plt0_entry;
15471 put_arm_insn (htab, output_bfd, plt0_entry[0],
15472 splt->contents + 0);
15473 put_arm_insn (htab, output_bfd, plt0_entry[1],
15474 splt->contents + 4);
15475 put_arm_insn (htab, output_bfd, plt0_entry[2],
15476 splt->contents + 8);
15477 put_arm_insn (htab, output_bfd, plt0_entry[3],
15478 splt->contents + 12);
15479
15480 #ifdef FOUR_WORD_PLT
15481 /* The displacement value goes in the otherwise-unused
15482 last word of the second entry. */
15483 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15484 #else
15485 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15486 #endif
15487 }
15488 }
15489
15490 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15491 really seem like the right value. */
15492 if (splt->output_section->owner == output_bfd)
15493 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15494
15495 if (htab->dt_tlsdesc_plt)
15496 {
15497 bfd_vma got_address
15498 = sgot->output_section->vma + sgot->output_offset;
15499 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15500 + htab->root.sgot->output_offset);
15501 bfd_vma plt_address
15502 = splt->output_section->vma + splt->output_offset;
15503
15504 arm_put_trampoline (htab, output_bfd,
15505 splt->contents + htab->dt_tlsdesc_plt,
15506 dl_tlsdesc_lazy_trampoline, 6);
15507
15508 bfd_put_32 (output_bfd,
15509 gotplt_address + htab->dt_tlsdesc_got
15510 - (plt_address + htab->dt_tlsdesc_plt)
15511 - dl_tlsdesc_lazy_trampoline[6],
15512 splt->contents + htab->dt_tlsdesc_plt + 24);
15513 bfd_put_32 (output_bfd,
15514 got_address - (plt_address + htab->dt_tlsdesc_plt)
15515 - dl_tlsdesc_lazy_trampoline[7],
15516 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15517 }
15518
15519 if (htab->tls_trampoline)
15520 {
15521 arm_put_trampoline (htab, output_bfd,
15522 splt->contents + htab->tls_trampoline,
15523 tls_trampoline, 3);
15524 #ifdef FOUR_WORD_PLT
15525 bfd_put_32 (output_bfd, 0x00000000,
15526 splt->contents + htab->tls_trampoline + 12);
15527 #endif
15528 }
15529
15530 if (htab->vxworks_p
15531 && !bfd_link_pic (info)
15532 && htab->root.splt->size > 0)
15533 {
15534 /* Correct the .rel(a).plt.unloaded relocations. They will have
15535 incorrect symbol indexes. */
15536 int num_plts;
15537 unsigned char *p;
15538
15539 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15540 / htab->plt_entry_size);
15541 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15542
15543 for (; num_plts; num_plts--)
15544 {
15545 Elf_Internal_Rela rel;
15546
15547 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15548 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15549 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15550 p += RELOC_SIZE (htab);
15551
15552 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15553 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15554 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15555 p += RELOC_SIZE (htab);
15556 }
15557 }
15558 }
15559
15560 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15561 /* NaCl uses a special first entry in .iplt too. */
15562 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15563
15564 /* Fill in the first three entries in the global offset table. */
15565 if (sgot)
15566 {
15567 if (sgot->size > 0)
15568 {
15569 if (sdyn == NULL)
15570 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15571 else
15572 bfd_put_32 (output_bfd,
15573 sdyn->output_section->vma + sdyn->output_offset,
15574 sgot->contents);
15575 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15576 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15577 }
15578
15579 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15580 }
15581
15582 return TRUE;
15583 }
15584
15585 static void
15586 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15587 {
15588 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15589 struct elf32_arm_link_hash_table *globals;
15590 struct elf_segment_map *m;
15591
15592 i_ehdrp = elf_elfheader (abfd);
15593
15594 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15595 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15596 else
15597 _bfd_elf_post_process_headers (abfd, link_info);
15598 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15599
15600 if (link_info)
15601 {
15602 globals = elf32_arm_hash_table (link_info);
15603 if (globals != NULL && globals->byteswap_code)
15604 i_ehdrp->e_flags |= EF_ARM_BE8;
15605 }
15606
15607 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15608 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15609 {
15610 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15611 if (abi == AEABI_VFP_args_vfp)
15612 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15613 else
15614 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15615 }
15616
15617 /* Scan segment to set p_flags attribute if it contains only sections with
15618 SHF_ARM_NOREAD flag. */
15619 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15620 {
15621 unsigned int j;
15622
15623 if (m->count == 0)
15624 continue;
15625 for (j = 0; j < m->count; j++)
15626 {
15627 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD))
15628 break;
15629 }
15630 if (j == m->count)
15631 {
15632 m->p_flags = PF_X;
15633 m->p_flags_valid = 1;
15634 }
15635 }
15636 }
15637
15638 static enum elf_reloc_type_class
15639 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15640 const asection *rel_sec ATTRIBUTE_UNUSED,
15641 const Elf_Internal_Rela *rela)
15642 {
15643 switch ((int) ELF32_R_TYPE (rela->r_info))
15644 {
15645 case R_ARM_RELATIVE:
15646 return reloc_class_relative;
15647 case R_ARM_JUMP_SLOT:
15648 return reloc_class_plt;
15649 case R_ARM_COPY:
15650 return reloc_class_copy;
15651 case R_ARM_IRELATIVE:
15652 return reloc_class_ifunc;
15653 default:
15654 return reloc_class_normal;
15655 }
15656 }
15657
15658 static void
15659 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15660 {
15661 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15662 }
15663
15664 /* Return TRUE if this is an unwinding table entry. */
15665
15666 static bfd_boolean
15667 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15668 {
15669 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15670 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15671 }
15672
15673
15674 /* Set the type and flags for an ARM section. We do this by
15675 the section name, which is a hack, but ought to work. */
15676
15677 static bfd_boolean
15678 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15679 {
15680 const char * name;
15681
15682 name = bfd_get_section_name (abfd, sec);
15683
15684 if (is_arm_elf_unwind_section_name (abfd, name))
15685 {
15686 hdr->sh_type = SHT_ARM_EXIDX;
15687 hdr->sh_flags |= SHF_LINK_ORDER;
15688 }
15689
15690 if (sec->flags & SEC_ELF_NOREAD)
15691 hdr->sh_flags |= SHF_ARM_NOREAD;
15692
15693 return TRUE;
15694 }
15695
15696 /* Handle an ARM specific section when reading an object file. This is
15697 called when bfd_section_from_shdr finds a section with an unknown
15698 type. */
15699
15700 static bfd_boolean
15701 elf32_arm_section_from_shdr (bfd *abfd,
15702 Elf_Internal_Shdr * hdr,
15703 const char *name,
15704 int shindex)
15705 {
15706 /* There ought to be a place to keep ELF backend specific flags, but
15707 at the moment there isn't one. We just keep track of the
15708 sections by their name, instead. Fortunately, the ABI gives
15709 names for all the ARM specific sections, so we will probably get
15710 away with this. */
15711 switch (hdr->sh_type)
15712 {
15713 case SHT_ARM_EXIDX:
15714 case SHT_ARM_PREEMPTMAP:
15715 case SHT_ARM_ATTRIBUTES:
15716 break;
15717
15718 default:
15719 return FALSE;
15720 }
15721
15722 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15723 return FALSE;
15724
15725 return TRUE;
15726 }
15727
15728 static _arm_elf_section_data *
15729 get_arm_elf_section_data (asection * sec)
15730 {
15731 if (sec && sec->owner && is_arm_elf (sec->owner))
15732 return elf32_arm_section_data (sec);
15733 else
15734 return NULL;
15735 }
15736
15737 typedef struct
15738 {
15739 void *flaginfo;
15740 struct bfd_link_info *info;
15741 asection *sec;
15742 int sec_shndx;
15743 int (*func) (void *, const char *, Elf_Internal_Sym *,
15744 asection *, struct elf_link_hash_entry *);
15745 } output_arch_syminfo;
15746
15747 enum map_symbol_type
15748 {
15749 ARM_MAP_ARM,
15750 ARM_MAP_THUMB,
15751 ARM_MAP_DATA
15752 };
15753
15754
15755 /* Output a single mapping symbol. */
15756
15757 static bfd_boolean
15758 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15759 enum map_symbol_type type,
15760 bfd_vma offset)
15761 {
15762 static const char *names[3] = {"$a", "$t", "$d"};
15763 Elf_Internal_Sym sym;
15764
15765 sym.st_value = osi->sec->output_section->vma
15766 + osi->sec->output_offset
15767 + offset;
15768 sym.st_size = 0;
15769 sym.st_other = 0;
15770 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15771 sym.st_shndx = osi->sec_shndx;
15772 sym.st_target_internal = 0;
15773 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15774 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15775 }
15776
15777 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15778 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15779
15780 static bfd_boolean
15781 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15782 bfd_boolean is_iplt_entry_p,
15783 union gotplt_union *root_plt,
15784 struct arm_plt_info *arm_plt)
15785 {
15786 struct elf32_arm_link_hash_table *htab;
15787 bfd_vma addr, plt_header_size;
15788
15789 if (root_plt->offset == (bfd_vma) -1)
15790 return TRUE;
15791
15792 htab = elf32_arm_hash_table (osi->info);
15793 if (htab == NULL)
15794 return FALSE;
15795
15796 if (is_iplt_entry_p)
15797 {
15798 osi->sec = htab->root.iplt;
15799 plt_header_size = 0;
15800 }
15801 else
15802 {
15803 osi->sec = htab->root.splt;
15804 plt_header_size = htab->plt_header_size;
15805 }
15806 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
15807 (osi->info->output_bfd, osi->sec->output_section));
15808
15809 addr = root_plt->offset & -2;
15810 if (htab->symbian_p)
15811 {
15812 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15813 return FALSE;
15814 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
15815 return FALSE;
15816 }
15817 else if (htab->vxworks_p)
15818 {
15819 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15820 return FALSE;
15821 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
15822 return FALSE;
15823 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
15824 return FALSE;
15825 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
15826 return FALSE;
15827 }
15828 else if (htab->nacl_p)
15829 {
15830 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15831 return FALSE;
15832 }
15833 else if (using_thumb_only (htab))
15834 {
15835 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
15836 return FALSE;
15837 }
15838 else
15839 {
15840 bfd_boolean thumb_stub_p;
15841
15842 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
15843 if (thumb_stub_p)
15844 {
15845 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
15846 return FALSE;
15847 }
15848 #ifdef FOUR_WORD_PLT
15849 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15850 return FALSE;
15851 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
15852 return FALSE;
15853 #else
15854 /* A three-word PLT with no Thumb thunk contains only Arm code,
15855 so only need to output a mapping symbol for the first PLT entry and
15856 entries with thumb thunks. */
15857 if (thumb_stub_p || addr == plt_header_size)
15858 {
15859 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15860 return FALSE;
15861 }
15862 #endif
15863 }
15864
15865 return TRUE;
15866 }
15867
15868 /* Output mapping symbols for PLT entries associated with H. */
15869
15870 static bfd_boolean
15871 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
15872 {
15873 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
15874 struct elf32_arm_link_hash_entry *eh;
15875
15876 if (h->root.type == bfd_link_hash_indirect)
15877 return TRUE;
15878
15879 if (h->root.type == bfd_link_hash_warning)
15880 /* When warning symbols are created, they **replace** the "real"
15881 entry in the hash table, thus we never get to see the real
15882 symbol in a hash traversal. So look at it now. */
15883 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15884
15885 eh = (struct elf32_arm_link_hash_entry *) h;
15886 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
15887 &h->plt, &eh->plt);
15888 }
15889
15890 /* Bind a veneered symbol to its veneer identified by its hash entry
15891 STUB_ENTRY. The veneered location thus loose its symbol. */
15892
15893 static void
15894 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
15895 {
15896 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
15897
15898 BFD_ASSERT (hash);
15899 hash->root.root.u.def.section = stub_entry->stub_sec;
15900 hash->root.root.u.def.value = stub_entry->stub_offset;
15901 hash->root.size = stub_entry->stub_size;
15902 }
15903
15904 /* Output a single local symbol for a generated stub. */
15905
15906 static bfd_boolean
15907 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
15908 bfd_vma offset, bfd_vma size)
15909 {
15910 Elf_Internal_Sym sym;
15911
15912 sym.st_value = osi->sec->output_section->vma
15913 + osi->sec->output_offset
15914 + offset;
15915 sym.st_size = size;
15916 sym.st_other = 0;
15917 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
15918 sym.st_shndx = osi->sec_shndx;
15919 sym.st_target_internal = 0;
15920 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
15921 }
15922
15923 static bfd_boolean
15924 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
15925 void * in_arg)
15926 {
15927 struct elf32_arm_stub_hash_entry *stub_entry;
15928 asection *stub_sec;
15929 bfd_vma addr;
15930 char *stub_name;
15931 output_arch_syminfo *osi;
15932 const insn_sequence *template_sequence;
15933 enum stub_insn_type prev_type;
15934 int size;
15935 int i;
15936 enum map_symbol_type sym_type;
15937
15938 /* Massage our args to the form they really have. */
15939 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15940 osi = (output_arch_syminfo *) in_arg;
15941
15942 stub_sec = stub_entry->stub_sec;
15943
15944 /* Ensure this stub is attached to the current section being
15945 processed. */
15946 if (stub_sec != osi->sec)
15947 return TRUE;
15948
15949 addr = (bfd_vma) stub_entry->stub_offset;
15950 template_sequence = stub_entry->stub_template;
15951
15952 if (arm_stub_sym_claimed (stub_entry->stub_type))
15953 arm_stub_claim_sym (stub_entry);
15954 else
15955 {
15956 stub_name = stub_entry->output_name;
15957 switch (template_sequence[0].type)
15958 {
15959 case ARM_TYPE:
15960 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
15961 stub_entry->stub_size))
15962 return FALSE;
15963 break;
15964 case THUMB16_TYPE:
15965 case THUMB32_TYPE:
15966 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
15967 stub_entry->stub_size))
15968 return FALSE;
15969 break;
15970 default:
15971 BFD_FAIL ();
15972 return 0;
15973 }
15974 }
15975
15976 prev_type = DATA_TYPE;
15977 size = 0;
15978 for (i = 0; i < stub_entry->stub_template_size; i++)
15979 {
15980 switch (template_sequence[i].type)
15981 {
15982 case ARM_TYPE:
15983 sym_type = ARM_MAP_ARM;
15984 break;
15985
15986 case THUMB16_TYPE:
15987 case THUMB32_TYPE:
15988 sym_type = ARM_MAP_THUMB;
15989 break;
15990
15991 case DATA_TYPE:
15992 sym_type = ARM_MAP_DATA;
15993 break;
15994
15995 default:
15996 BFD_FAIL ();
15997 return FALSE;
15998 }
15999
16000 if (template_sequence[i].type != prev_type)
16001 {
16002 prev_type = template_sequence[i].type;
16003 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16004 return FALSE;
16005 }
16006
16007 switch (template_sequence[i].type)
16008 {
16009 case ARM_TYPE:
16010 case THUMB32_TYPE:
16011 size += 4;
16012 break;
16013
16014 case THUMB16_TYPE:
16015 size += 2;
16016 break;
16017
16018 case DATA_TYPE:
16019 size += 4;
16020 break;
16021
16022 default:
16023 BFD_FAIL ();
16024 return FALSE;
16025 }
16026 }
16027
16028 return TRUE;
16029 }
16030
16031 /* Output mapping symbols for linker generated sections,
16032 and for those data-only sections that do not have a
16033 $d. */
16034
16035 static bfd_boolean
16036 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16037 struct bfd_link_info *info,
16038 void *flaginfo,
16039 int (*func) (void *, const char *,
16040 Elf_Internal_Sym *,
16041 asection *,
16042 struct elf_link_hash_entry *))
16043 {
16044 output_arch_syminfo osi;
16045 struct elf32_arm_link_hash_table *htab;
16046 bfd_vma offset;
16047 bfd_size_type size;
16048 bfd *input_bfd;
16049
16050 htab = elf32_arm_hash_table (info);
16051 if (htab == NULL)
16052 return FALSE;
16053
16054 check_use_blx (htab);
16055
16056 osi.flaginfo = flaginfo;
16057 osi.info = info;
16058 osi.func = func;
16059
16060 /* Add a $d mapping symbol to data-only sections that
16061 don't have any mapping symbol. This may result in (harmless) redundant
16062 mapping symbols. */
16063 for (input_bfd = info->input_bfds;
16064 input_bfd != NULL;
16065 input_bfd = input_bfd->link.next)
16066 {
16067 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16068 for (osi.sec = input_bfd->sections;
16069 osi.sec != NULL;
16070 osi.sec = osi.sec->next)
16071 {
16072 if (osi.sec->output_section != NULL
16073 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16074 != 0)
16075 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16076 == SEC_HAS_CONTENTS
16077 && get_arm_elf_section_data (osi.sec) != NULL
16078 && get_arm_elf_section_data (osi.sec)->mapcount == 0
16079 && osi.sec->size > 0
16080 && (osi.sec->flags & SEC_EXCLUDE) == 0)
16081 {
16082 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16083 (output_bfd, osi.sec->output_section);
16084 if (osi.sec_shndx != (int)SHN_BAD)
16085 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16086 }
16087 }
16088 }
16089
16090 /* ARM->Thumb glue. */
16091 if (htab->arm_glue_size > 0)
16092 {
16093 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16094 ARM2THUMB_GLUE_SECTION_NAME);
16095
16096 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16097 (output_bfd, osi.sec->output_section);
16098 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16099 || htab->pic_veneer)
16100 size = ARM2THUMB_PIC_GLUE_SIZE;
16101 else if (htab->use_blx)
16102 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16103 else
16104 size = ARM2THUMB_STATIC_GLUE_SIZE;
16105
16106 for (offset = 0; offset < htab->arm_glue_size; offset += size)
16107 {
16108 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16109 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16110 }
16111 }
16112
16113 /* Thumb->ARM glue. */
16114 if (htab->thumb_glue_size > 0)
16115 {
16116 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16117 THUMB2ARM_GLUE_SECTION_NAME);
16118
16119 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16120 (output_bfd, osi.sec->output_section);
16121 size = THUMB2ARM_GLUE_SIZE;
16122
16123 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16124 {
16125 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16126 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16127 }
16128 }
16129
16130 /* ARMv4 BX veneers. */
16131 if (htab->bx_glue_size > 0)
16132 {
16133 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16134 ARM_BX_GLUE_SECTION_NAME);
16135
16136 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16137 (output_bfd, osi.sec->output_section);
16138
16139 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16140 }
16141
16142 /* Long calls stubs. */
16143 if (htab->stub_bfd && htab->stub_bfd->sections)
16144 {
16145 asection* stub_sec;
16146
16147 for (stub_sec = htab->stub_bfd->sections;
16148 stub_sec != NULL;
16149 stub_sec = stub_sec->next)
16150 {
16151 /* Ignore non-stub sections. */
16152 if (!strstr (stub_sec->name, STUB_SUFFIX))
16153 continue;
16154
16155 osi.sec = stub_sec;
16156
16157 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16158 (output_bfd, osi.sec->output_section);
16159
16160 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16161 }
16162 }
16163
16164 /* Finally, output mapping symbols for the PLT. */
16165 if (htab->root.splt && htab->root.splt->size > 0)
16166 {
16167 osi.sec = htab->root.splt;
16168 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16169 (output_bfd, osi.sec->output_section));
16170
16171 /* Output mapping symbols for the plt header. SymbianOS does not have a
16172 plt header. */
16173 if (htab->vxworks_p)
16174 {
16175 /* VxWorks shared libraries have no PLT header. */
16176 if (!bfd_link_pic (info))
16177 {
16178 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16179 return FALSE;
16180 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16181 return FALSE;
16182 }
16183 }
16184 else if (htab->nacl_p)
16185 {
16186 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16187 return FALSE;
16188 }
16189 else if (using_thumb_only (htab))
16190 {
16191 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16192 return FALSE;
16193 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16194 return FALSE;
16195 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16196 return FALSE;
16197 }
16198 else if (!htab->symbian_p)
16199 {
16200 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16201 return FALSE;
16202 #ifndef FOUR_WORD_PLT
16203 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16204 return FALSE;
16205 #endif
16206 }
16207 }
16208 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16209 {
16210 /* NaCl uses a special first entry in .iplt too. */
16211 osi.sec = htab->root.iplt;
16212 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16213 (output_bfd, osi.sec->output_section));
16214 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16215 return FALSE;
16216 }
16217 if ((htab->root.splt && htab->root.splt->size > 0)
16218 || (htab->root.iplt && htab->root.iplt->size > 0))
16219 {
16220 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16221 for (input_bfd = info->input_bfds;
16222 input_bfd != NULL;
16223 input_bfd = input_bfd->link.next)
16224 {
16225 struct arm_local_iplt_info **local_iplt;
16226 unsigned int i, num_syms;
16227
16228 local_iplt = elf32_arm_local_iplt (input_bfd);
16229 if (local_iplt != NULL)
16230 {
16231 num_syms = elf_symtab_hdr (input_bfd).sh_info;
16232 for (i = 0; i < num_syms; i++)
16233 if (local_iplt[i] != NULL
16234 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16235 &local_iplt[i]->root,
16236 &local_iplt[i]->arm))
16237 return FALSE;
16238 }
16239 }
16240 }
16241 if (htab->dt_tlsdesc_plt != 0)
16242 {
16243 /* Mapping symbols for the lazy tls trampoline. */
16244 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16245 return FALSE;
16246
16247 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16248 htab->dt_tlsdesc_plt + 24))
16249 return FALSE;
16250 }
16251 if (htab->tls_trampoline != 0)
16252 {
16253 /* Mapping symbols for the tls trampoline. */
16254 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16255 return FALSE;
16256 #ifdef FOUR_WORD_PLT
16257 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16258 htab->tls_trampoline + 12))
16259 return FALSE;
16260 #endif
16261 }
16262
16263 return TRUE;
16264 }
16265
16266 /* Allocate target specific section data. */
16267
16268 static bfd_boolean
16269 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16270 {
16271 if (!sec->used_by_bfd)
16272 {
16273 _arm_elf_section_data *sdata;
16274 bfd_size_type amt = sizeof (*sdata);
16275
16276 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16277 if (sdata == NULL)
16278 return FALSE;
16279 sec->used_by_bfd = sdata;
16280 }
16281
16282 return _bfd_elf_new_section_hook (abfd, sec);
16283 }
16284
16285
16286 /* Used to order a list of mapping symbols by address. */
16287
16288 static int
16289 elf32_arm_compare_mapping (const void * a, const void * b)
16290 {
16291 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16292 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16293
16294 if (amap->vma > bmap->vma)
16295 return 1;
16296 else if (amap->vma < bmap->vma)
16297 return -1;
16298 else if (amap->type > bmap->type)
16299 /* Ensure results do not depend on the host qsort for objects with
16300 multiple mapping symbols at the same address by sorting on type
16301 after vma. */
16302 return 1;
16303 else if (amap->type < bmap->type)
16304 return -1;
16305 else
16306 return 0;
16307 }
16308
16309 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16310
16311 static unsigned long
16312 offset_prel31 (unsigned long addr, bfd_vma offset)
16313 {
16314 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16315 }
16316
16317 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16318 relocations. */
16319
16320 static void
16321 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16322 {
16323 unsigned long first_word = bfd_get_32 (output_bfd, from);
16324 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16325
16326 /* High bit of first word is supposed to be zero. */
16327 if ((first_word & 0x80000000ul) == 0)
16328 first_word = offset_prel31 (first_word, offset);
16329
16330 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16331 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16332 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16333 second_word = offset_prel31 (second_word, offset);
16334
16335 bfd_put_32 (output_bfd, first_word, to);
16336 bfd_put_32 (output_bfd, second_word, to + 4);
16337 }
16338
16339 /* Data for make_branch_to_a8_stub(). */
16340
16341 struct a8_branch_to_stub_data
16342 {
16343 asection *writing_section;
16344 bfd_byte *contents;
16345 };
16346
16347
16348 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16349 places for a particular section. */
16350
16351 static bfd_boolean
16352 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16353 void *in_arg)
16354 {
16355 struct elf32_arm_stub_hash_entry *stub_entry;
16356 struct a8_branch_to_stub_data *data;
16357 bfd_byte *contents;
16358 unsigned long branch_insn;
16359 bfd_vma veneered_insn_loc, veneer_entry_loc;
16360 bfd_signed_vma branch_offset;
16361 bfd *abfd;
16362 unsigned int loc;
16363
16364 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16365 data = (struct a8_branch_to_stub_data *) in_arg;
16366
16367 if (stub_entry->target_section != data->writing_section
16368 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16369 return TRUE;
16370
16371 contents = data->contents;
16372
16373 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16374 generated when both source and target are in the same section. */
16375 veneered_insn_loc = stub_entry->target_section->output_section->vma
16376 + stub_entry->target_section->output_offset
16377 + stub_entry->source_value;
16378
16379 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16380 + stub_entry->stub_sec->output_offset
16381 + stub_entry->stub_offset;
16382
16383 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16384 veneered_insn_loc &= ~3u;
16385
16386 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16387
16388 abfd = stub_entry->target_section->owner;
16389 loc = stub_entry->source_value;
16390
16391 /* We attempt to avoid this condition by setting stubs_always_after_branch
16392 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16393 This check is just to be on the safe side... */
16394 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16395 {
16396 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16397 "allocated in unsafe location"), abfd);
16398 return FALSE;
16399 }
16400
16401 switch (stub_entry->stub_type)
16402 {
16403 case arm_stub_a8_veneer_b:
16404 case arm_stub_a8_veneer_b_cond:
16405 branch_insn = 0xf0009000;
16406 goto jump24;
16407
16408 case arm_stub_a8_veneer_blx:
16409 branch_insn = 0xf000e800;
16410 goto jump24;
16411
16412 case arm_stub_a8_veneer_bl:
16413 {
16414 unsigned int i1, j1, i2, j2, s;
16415
16416 branch_insn = 0xf000d000;
16417
16418 jump24:
16419 if (branch_offset < -16777216 || branch_offset > 16777214)
16420 {
16421 /* There's not much we can do apart from complain if this
16422 happens. */
16423 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16424 "of range (input file too large)"), abfd);
16425 return FALSE;
16426 }
16427
16428 /* i1 = not(j1 eor s), so:
16429 not i1 = j1 eor s
16430 j1 = (not i1) eor s. */
16431
16432 branch_insn |= (branch_offset >> 1) & 0x7ff;
16433 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16434 i2 = (branch_offset >> 22) & 1;
16435 i1 = (branch_offset >> 23) & 1;
16436 s = (branch_offset >> 24) & 1;
16437 j1 = (!i1) ^ s;
16438 j2 = (!i2) ^ s;
16439 branch_insn |= j2 << 11;
16440 branch_insn |= j1 << 13;
16441 branch_insn |= s << 26;
16442 }
16443 break;
16444
16445 default:
16446 BFD_FAIL ();
16447 return FALSE;
16448 }
16449
16450 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16451 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16452
16453 return TRUE;
16454 }
16455
16456 /* Beginning of stm32l4xx work-around. */
16457
16458 /* Functions encoding instructions necessary for the emission of the
16459 fix-stm32l4xx-629360.
16460 Encoding is extracted from the
16461 ARM (C) Architecture Reference Manual
16462 ARMv7-A and ARMv7-R edition
16463 ARM DDI 0406C.b (ID072512). */
16464
16465 static inline bfd_vma
16466 create_instruction_branch_absolute (int branch_offset)
16467 {
16468 /* A8.8.18 B (A8-334)
16469 B target_address (Encoding T4). */
16470 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16471 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16472 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16473
16474 int s = ((branch_offset & 0x1000000) >> 24);
16475 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16476 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16477
16478 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16479 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16480
16481 bfd_vma patched_inst = 0xf0009000
16482 | s << 26 /* S. */
16483 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16484 | j1 << 13 /* J1. */
16485 | j2 << 11 /* J2. */
16486 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16487
16488 return patched_inst;
16489 }
16490
16491 static inline bfd_vma
16492 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16493 {
16494 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16495 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16496 bfd_vma patched_inst = 0xe8900000
16497 | (/*W=*/wback << 21)
16498 | (base_reg << 16)
16499 | (reg_mask & 0x0000ffff);
16500
16501 return patched_inst;
16502 }
16503
16504 static inline bfd_vma
16505 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16506 {
16507 /* A8.8.60 LDMDB/LDMEA (A8-402)
16508 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16509 bfd_vma patched_inst = 0xe9100000
16510 | (/*W=*/wback << 21)
16511 | (base_reg << 16)
16512 | (reg_mask & 0x0000ffff);
16513
16514 return patched_inst;
16515 }
16516
16517 static inline bfd_vma
16518 create_instruction_mov (int target_reg, int source_reg)
16519 {
16520 /* A8.8.103 MOV (register) (A8-486)
16521 MOV Rd, Rm (Encoding T1). */
16522 bfd_vma patched_inst = 0x4600
16523 | (target_reg & 0x7)
16524 | ((target_reg & 0x8) >> 3) << 7
16525 | (source_reg << 3);
16526
16527 return patched_inst;
16528 }
16529
16530 static inline bfd_vma
16531 create_instruction_sub (int target_reg, int source_reg, int value)
16532 {
16533 /* A8.8.221 SUB (immediate) (A8-708)
16534 SUB Rd, Rn, #value (Encoding T3). */
16535 bfd_vma patched_inst = 0xf1a00000
16536 | (target_reg << 8)
16537 | (source_reg << 16)
16538 | (/*S=*/0 << 20)
16539 | ((value & 0x800) >> 11) << 26
16540 | ((value & 0x700) >> 8) << 12
16541 | (value & 0x0ff);
16542
16543 return patched_inst;
16544 }
16545
16546 static inline bfd_vma
16547 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16548 int first_reg)
16549 {
16550 /* A8.8.332 VLDM (A8-922)
16551 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16552 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16553 | (/*W=*/wback << 21)
16554 | (base_reg << 16)
16555 | (num_words & 0x000000ff)
16556 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16557 | (first_reg & 0x00000001) << 22;
16558
16559 return patched_inst;
16560 }
16561
16562 static inline bfd_vma
16563 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16564 int first_reg)
16565 {
16566 /* A8.8.332 VLDM (A8-922)
16567 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16568 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16569 | (base_reg << 16)
16570 | (num_words & 0x000000ff)
16571 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16572 | (first_reg & 0x00000001) << 22;
16573
16574 return patched_inst;
16575 }
16576
16577 static inline bfd_vma
16578 create_instruction_udf_w (int value)
16579 {
16580 /* A8.8.247 UDF (A8-758)
16581 Undefined (Encoding T2). */
16582 bfd_vma patched_inst = 0xf7f0a000
16583 | (value & 0x00000fff)
16584 | (value & 0x000f0000) << 16;
16585
16586 return patched_inst;
16587 }
16588
16589 static inline bfd_vma
16590 create_instruction_udf (int value)
16591 {
16592 /* A8.8.247 UDF (A8-758)
16593 Undefined (Encoding T1). */
16594 bfd_vma patched_inst = 0xde00
16595 | (value & 0xff);
16596
16597 return patched_inst;
16598 }
16599
16600 /* Functions writing an instruction in memory, returning the next
16601 memory position to write to. */
16602
16603 static inline bfd_byte *
16604 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16605 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16606 {
16607 put_thumb2_insn (htab, output_bfd, insn, pt);
16608 return pt + 4;
16609 }
16610
16611 static inline bfd_byte *
16612 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16613 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16614 {
16615 put_thumb_insn (htab, output_bfd, insn, pt);
16616 return pt + 2;
16617 }
16618
16619 /* Function filling up a region in memory with T1 and T2 UDFs taking
16620 care of alignment. */
16621
16622 static bfd_byte *
16623 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16624 bfd * output_bfd,
16625 const bfd_byte * const base_stub_contents,
16626 bfd_byte * const from_stub_contents,
16627 const bfd_byte * const end_stub_contents)
16628 {
16629 bfd_byte *current_stub_contents = from_stub_contents;
16630
16631 /* Fill the remaining of the stub with deterministic contents : UDF
16632 instructions.
16633 Check if realignment is needed on modulo 4 frontier using T1, to
16634 further use T2. */
16635 if ((current_stub_contents < end_stub_contents)
16636 && !((current_stub_contents - base_stub_contents) % 2)
16637 && ((current_stub_contents - base_stub_contents) % 4))
16638 current_stub_contents =
16639 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16640 create_instruction_udf (0));
16641
16642 for (; current_stub_contents < end_stub_contents;)
16643 current_stub_contents =
16644 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16645 create_instruction_udf_w (0));
16646
16647 return current_stub_contents;
16648 }
16649
16650 /* Functions writing the stream of instructions equivalent to the
16651 derived sequence for ldmia, ldmdb, vldm respectively. */
16652
16653 static void
16654 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16655 bfd * output_bfd,
16656 const insn32 initial_insn,
16657 const bfd_byte *const initial_insn_addr,
16658 bfd_byte *const base_stub_contents)
16659 {
16660 int wback = (initial_insn & 0x00200000) >> 21;
16661 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16662 int insn_all_registers = initial_insn & 0x0000ffff;
16663 int insn_low_registers, insn_high_registers;
16664 int usable_register_mask;
16665 int nb_registers = popcount (insn_all_registers);
16666 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16667 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16668 bfd_byte *current_stub_contents = base_stub_contents;
16669
16670 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16671
16672 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16673 smaller than 8 registers load sequences that do not cause the
16674 hardware issue. */
16675 if (nb_registers <= 8)
16676 {
16677 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16678 current_stub_contents =
16679 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16680 initial_insn);
16681
16682 /* B initial_insn_addr+4. */
16683 if (!restore_pc)
16684 current_stub_contents =
16685 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16686 create_instruction_branch_absolute
16687 (initial_insn_addr - current_stub_contents));
16688
16689
16690 /* Fill the remaining of the stub with deterministic contents. */
16691 current_stub_contents =
16692 stm32l4xx_fill_stub_udf (htab, output_bfd,
16693 base_stub_contents, current_stub_contents,
16694 base_stub_contents +
16695 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16696
16697 return;
16698 }
16699
16700 /* - reg_list[13] == 0. */
16701 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16702
16703 /* - reg_list[14] & reg_list[15] != 1. */
16704 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16705
16706 /* - if (wback==1) reg_list[rn] == 0. */
16707 BFD_ASSERT (!wback || !restore_rn);
16708
16709 /* - nb_registers > 8. */
16710 BFD_ASSERT (popcount (insn_all_registers) > 8);
16711
16712 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16713
16714 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16715 - One with the 7 lowest registers (register mask 0x007F)
16716 This LDM will finally contain between 2 and 7 registers
16717 - One with the 7 highest registers (register mask 0xDF80)
16718 This ldm will finally contain between 2 and 7 registers. */
16719 insn_low_registers = insn_all_registers & 0x007F;
16720 insn_high_registers = insn_all_registers & 0xDF80;
16721
16722 /* A spare register may be needed during this veneer to temporarily
16723 handle the base register. This register will be restored with the
16724 last LDM operation.
16725 The usable register may be any general purpose register (that
16726 excludes PC, SP, LR : register mask is 0x1FFF). */
16727 usable_register_mask = 0x1FFF;
16728
16729 /* Generate the stub function. */
16730 if (wback)
16731 {
16732 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16733 current_stub_contents =
16734 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16735 create_instruction_ldmia
16736 (rn, /*wback=*/1, insn_low_registers));
16737
16738 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16739 current_stub_contents =
16740 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16741 create_instruction_ldmia
16742 (rn, /*wback=*/1, insn_high_registers));
16743 if (!restore_pc)
16744 {
16745 /* B initial_insn_addr+4. */
16746 current_stub_contents =
16747 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16748 create_instruction_branch_absolute
16749 (initial_insn_addr - current_stub_contents));
16750 }
16751 }
16752 else /* if (!wback). */
16753 {
16754 ri = rn;
16755
16756 /* If Rn is not part of the high-register-list, move it there. */
16757 if (!(insn_high_registers & (1 << rn)))
16758 {
16759 /* Choose a Ri in the high-register-list that will be restored. */
16760 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16761
16762 /* MOV Ri, Rn. */
16763 current_stub_contents =
16764 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16765 create_instruction_mov (ri, rn));
16766 }
16767
16768 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16769 current_stub_contents =
16770 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16771 create_instruction_ldmia
16772 (ri, /*wback=*/1, insn_low_registers));
16773
16774 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16775 current_stub_contents =
16776 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16777 create_instruction_ldmia
16778 (ri, /*wback=*/0, insn_high_registers));
16779
16780 if (!restore_pc)
16781 {
16782 /* B initial_insn_addr+4. */
16783 current_stub_contents =
16784 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16785 create_instruction_branch_absolute
16786 (initial_insn_addr - current_stub_contents));
16787 }
16788 }
16789
16790 /* Fill the remaining of the stub with deterministic contents. */
16791 current_stub_contents =
16792 stm32l4xx_fill_stub_udf (htab, output_bfd,
16793 base_stub_contents, current_stub_contents,
16794 base_stub_contents +
16795 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16796 }
16797
16798 static void
16799 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16800 bfd * output_bfd,
16801 const insn32 initial_insn,
16802 const bfd_byte *const initial_insn_addr,
16803 bfd_byte *const base_stub_contents)
16804 {
16805 int wback = (initial_insn & 0x00200000) >> 21;
16806 int ri, rn = (initial_insn & 0x000f0000) >> 16;
16807 int insn_all_registers = initial_insn & 0x0000ffff;
16808 int insn_low_registers, insn_high_registers;
16809 int usable_register_mask;
16810 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16811 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16812 int nb_registers = popcount (insn_all_registers);
16813 bfd_byte *current_stub_contents = base_stub_contents;
16814
16815 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
16816
16817 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16818 smaller than 8 registers load sequences that do not cause the
16819 hardware issue. */
16820 if (nb_registers <= 8)
16821 {
16822 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16823 current_stub_contents =
16824 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16825 initial_insn);
16826
16827 /* B initial_insn_addr+4. */
16828 current_stub_contents =
16829 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16830 create_instruction_branch_absolute
16831 (initial_insn_addr - current_stub_contents));
16832
16833 /* Fill the remaining of the stub with deterministic contents. */
16834 current_stub_contents =
16835 stm32l4xx_fill_stub_udf (htab, output_bfd,
16836 base_stub_contents, current_stub_contents,
16837 base_stub_contents +
16838 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16839
16840 return;
16841 }
16842
16843 /* - reg_list[13] == 0. */
16844 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
16845
16846 /* - reg_list[14] & reg_list[15] != 1. */
16847 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16848
16849 /* - if (wback==1) reg_list[rn] == 0. */
16850 BFD_ASSERT (!wback || !restore_rn);
16851
16852 /* - nb_registers > 8. */
16853 BFD_ASSERT (popcount (insn_all_registers) > 8);
16854
16855 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16856
16857 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
16858 - One with the 7 lowest registers (register mask 0x007F)
16859 This LDM will finally contain between 2 and 7 registers
16860 - One with the 7 highest registers (register mask 0xDF80)
16861 This ldm will finally contain between 2 and 7 registers. */
16862 insn_low_registers = insn_all_registers & 0x007F;
16863 insn_high_registers = insn_all_registers & 0xDF80;
16864
16865 /* A spare register may be needed during this veneer to temporarily
16866 handle the base register. This register will be restored with
16867 the last LDM operation.
16868 The usable register may be any general purpose register (that excludes
16869 PC, SP, LR : register mask is 0x1FFF). */
16870 usable_register_mask = 0x1FFF;
16871
16872 /* Generate the stub function. */
16873 if (!wback && !restore_pc && !restore_rn)
16874 {
16875 /* Choose a Ri in the low-register-list that will be restored. */
16876 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
16877
16878 /* MOV Ri, Rn. */
16879 current_stub_contents =
16880 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16881 create_instruction_mov (ri, rn));
16882
16883 /* LDMDB Ri!, {R-high-register-list}. */
16884 current_stub_contents =
16885 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16886 create_instruction_ldmdb
16887 (ri, /*wback=*/1, insn_high_registers));
16888
16889 /* LDMDB Ri, {R-low-register-list}. */
16890 current_stub_contents =
16891 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16892 create_instruction_ldmdb
16893 (ri, /*wback=*/0, insn_low_registers));
16894
16895 /* B initial_insn_addr+4. */
16896 current_stub_contents =
16897 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16898 create_instruction_branch_absolute
16899 (initial_insn_addr - current_stub_contents));
16900 }
16901 else if (wback && !restore_pc && !restore_rn)
16902 {
16903 /* LDMDB Rn!, {R-high-register-list}. */
16904 current_stub_contents =
16905 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16906 create_instruction_ldmdb
16907 (rn, /*wback=*/1, insn_high_registers));
16908
16909 /* LDMDB Rn!, {R-low-register-list}. */
16910 current_stub_contents =
16911 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16912 create_instruction_ldmdb
16913 (rn, /*wback=*/1, insn_low_registers));
16914
16915 /* B initial_insn_addr+4. */
16916 current_stub_contents =
16917 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16918 create_instruction_branch_absolute
16919 (initial_insn_addr - current_stub_contents));
16920 }
16921 else if (!wback && restore_pc && !restore_rn)
16922 {
16923 /* Choose a Ri in the high-register-list that will be restored. */
16924 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16925
16926 /* SUB Ri, Rn, #(4*nb_registers). */
16927 current_stub_contents =
16928 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16929 create_instruction_sub (ri, rn, (4 * nb_registers)));
16930
16931 /* LDMIA Ri!, {R-low-register-list}. */
16932 current_stub_contents =
16933 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16934 create_instruction_ldmia
16935 (ri, /*wback=*/1, insn_low_registers));
16936
16937 /* LDMIA Ri, {R-high-register-list}. */
16938 current_stub_contents =
16939 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16940 create_instruction_ldmia
16941 (ri, /*wback=*/0, insn_high_registers));
16942 }
16943 else if (wback && restore_pc && !restore_rn)
16944 {
16945 /* Choose a Ri in the high-register-list that will be restored. */
16946 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16947
16948 /* SUB Rn, Rn, #(4*nb_registers) */
16949 current_stub_contents =
16950 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16951 create_instruction_sub (rn, rn, (4 * nb_registers)));
16952
16953 /* MOV Ri, Rn. */
16954 current_stub_contents =
16955 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16956 create_instruction_mov (ri, rn));
16957
16958 /* LDMIA Ri!, {R-low-register-list}. */
16959 current_stub_contents =
16960 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16961 create_instruction_ldmia
16962 (ri, /*wback=*/1, insn_low_registers));
16963
16964 /* LDMIA Ri, {R-high-register-list}. */
16965 current_stub_contents =
16966 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16967 create_instruction_ldmia
16968 (ri, /*wback=*/0, insn_high_registers));
16969 }
16970 else if (!wback && !restore_pc && restore_rn)
16971 {
16972 ri = rn;
16973 if (!(insn_low_registers & (1 << rn)))
16974 {
16975 /* Choose a Ri in the low-register-list that will be restored. */
16976 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
16977
16978 /* MOV Ri, Rn. */
16979 current_stub_contents =
16980 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16981 create_instruction_mov (ri, rn));
16982 }
16983
16984 /* LDMDB Ri!, {R-high-register-list}. */
16985 current_stub_contents =
16986 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16987 create_instruction_ldmdb
16988 (ri, /*wback=*/1, insn_high_registers));
16989
16990 /* LDMDB Ri, {R-low-register-list}. */
16991 current_stub_contents =
16992 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16993 create_instruction_ldmdb
16994 (ri, /*wback=*/0, insn_low_registers));
16995
16996 /* B initial_insn_addr+4. */
16997 current_stub_contents =
16998 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16999 create_instruction_branch_absolute
17000 (initial_insn_addr - current_stub_contents));
17001 }
17002 else if (!wback && restore_pc && restore_rn)
17003 {
17004 ri = rn;
17005 if (!(insn_high_registers & (1 << rn)))
17006 {
17007 /* Choose a Ri in the high-register-list that will be restored. */
17008 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17009 }
17010
17011 /* SUB Ri, Rn, #(4*nb_registers). */
17012 current_stub_contents =
17013 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17014 create_instruction_sub (ri, rn, (4 * nb_registers)));
17015
17016 /* LDMIA Ri!, {R-low-register-list}. */
17017 current_stub_contents =
17018 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17019 create_instruction_ldmia
17020 (ri, /*wback=*/1, insn_low_registers));
17021
17022 /* LDMIA Ri, {R-high-register-list}. */
17023 current_stub_contents =
17024 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17025 create_instruction_ldmia
17026 (ri, /*wback=*/0, insn_high_registers));
17027 }
17028 else if (wback && restore_rn)
17029 {
17030 /* The assembler should not have accepted to encode this. */
17031 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17032 "undefined behavior.\n");
17033 }
17034
17035 /* Fill the remaining of the stub with deterministic contents. */
17036 current_stub_contents =
17037 stm32l4xx_fill_stub_udf (htab, output_bfd,
17038 base_stub_contents, current_stub_contents,
17039 base_stub_contents +
17040 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17041
17042 }
17043
17044 static void
17045 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17046 bfd * output_bfd,
17047 const insn32 initial_insn,
17048 const bfd_byte *const initial_insn_addr,
17049 bfd_byte *const base_stub_contents)
17050 {
17051 int num_words = ((unsigned int) initial_insn << 24) >> 24;
17052 bfd_byte *current_stub_contents = base_stub_contents;
17053
17054 BFD_ASSERT (is_thumb2_vldm (initial_insn));
17055
17056 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17057 smaller than 8 words load sequences that do not cause the
17058 hardware issue. */
17059 if (num_words <= 8)
17060 {
17061 /* Untouched instruction. */
17062 current_stub_contents =
17063 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17064 initial_insn);
17065
17066 /* B initial_insn_addr+4. */
17067 current_stub_contents =
17068 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17069 create_instruction_branch_absolute
17070 (initial_insn_addr - current_stub_contents));
17071 }
17072 else
17073 {
17074 bfd_boolean is_dp = /* DP encoding. */
17075 (initial_insn & 0xfe100f00) == 0xec100b00;
17076 bfd_boolean is_ia_nobang = /* (IA without !). */
17077 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
17078 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
17079 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
17080 bfd_boolean is_db_bang = /* (DB with !). */
17081 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
17082 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17083 /* d = UInt (Vd:D);. */
17084 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17085 | (((unsigned int)initial_insn << 9) >> 31);
17086
17087 /* Compute the number of 8-words chunks needed to split. */
17088 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17089 int chunk;
17090
17091 /* The test coverage has been done assuming the following
17092 hypothesis that exactly one of the previous is_ predicates is
17093 true. */
17094 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17095 && !(is_ia_nobang & is_ia_bang & is_db_bang));
17096
17097 /* We treat the cutting of the words in one pass for all
17098 cases, then we emit the adjustments:
17099
17100 vldm rx, {...}
17101 -> vldm rx!, {8_words_or_less} for each needed 8_word
17102 -> sub rx, rx, #size (list)
17103
17104 vldm rx!, {...}
17105 -> vldm rx!, {8_words_or_less} for each needed 8_word
17106 This also handles vpop instruction (when rx is sp)
17107
17108 vldmd rx!, {...}
17109 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17110 for (chunk = 0; chunk < chunks; ++chunk)
17111 {
17112 bfd_vma new_insn = 0;
17113
17114 if (is_ia_nobang || is_ia_bang)
17115 {
17116 new_insn = create_instruction_vldmia
17117 (base_reg,
17118 is_dp,
17119 /*wback= . */1,
17120 chunks - (chunk + 1) ?
17121 8 : num_words - chunk * 8,
17122 first_reg + chunk * 8);
17123 }
17124 else if (is_db_bang)
17125 {
17126 new_insn = create_instruction_vldmdb
17127 (base_reg,
17128 is_dp,
17129 chunks - (chunk + 1) ?
17130 8 : num_words - chunk * 8,
17131 first_reg + chunk * 8);
17132 }
17133
17134 if (new_insn)
17135 current_stub_contents =
17136 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17137 new_insn);
17138 }
17139
17140 /* Only this case requires the base register compensation
17141 subtract. */
17142 if (is_ia_nobang)
17143 {
17144 current_stub_contents =
17145 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17146 create_instruction_sub
17147 (base_reg, base_reg, 4*num_words));
17148 }
17149
17150 /* B initial_insn_addr+4. */
17151 current_stub_contents =
17152 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17153 create_instruction_branch_absolute
17154 (initial_insn_addr - current_stub_contents));
17155 }
17156
17157 /* Fill the remaining of the stub with deterministic contents. */
17158 current_stub_contents =
17159 stm32l4xx_fill_stub_udf (htab, output_bfd,
17160 base_stub_contents, current_stub_contents,
17161 base_stub_contents +
17162 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17163 }
17164
17165 static void
17166 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17167 bfd * output_bfd,
17168 const insn32 wrong_insn,
17169 const bfd_byte *const wrong_insn_addr,
17170 bfd_byte *const stub_contents)
17171 {
17172 if (is_thumb2_ldmia (wrong_insn))
17173 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17174 wrong_insn, wrong_insn_addr,
17175 stub_contents);
17176 else if (is_thumb2_ldmdb (wrong_insn))
17177 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17178 wrong_insn, wrong_insn_addr,
17179 stub_contents);
17180 else if (is_thumb2_vldm (wrong_insn))
17181 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17182 wrong_insn, wrong_insn_addr,
17183 stub_contents);
17184 }
17185
17186 /* End of stm32l4xx work-around. */
17187
17188
17189 static void
17190 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17191 asection *output_sec, Elf_Internal_Rela *rel)
17192 {
17193 BFD_ASSERT (output_sec && rel);
17194 struct bfd_elf_section_reloc_data *output_reldata;
17195 struct elf32_arm_link_hash_table *htab;
17196 struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17197 Elf_Internal_Shdr *rel_hdr;
17198
17199
17200 if (oesd->rel.hdr)
17201 {
17202 rel_hdr = oesd->rel.hdr;
17203 output_reldata = &(oesd->rel);
17204 }
17205 else if (oesd->rela.hdr)
17206 {
17207 rel_hdr = oesd->rela.hdr;
17208 output_reldata = &(oesd->rela);
17209 }
17210 else
17211 {
17212 abort ();
17213 }
17214
17215 bfd_byte *erel = rel_hdr->contents;
17216 erel += output_reldata->count * rel_hdr->sh_entsize;
17217 htab = elf32_arm_hash_table (info);
17218 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17219 output_reldata->count++;
17220 }
17221
17222 /* Do code byteswapping. Return FALSE afterwards so that the section is
17223 written out as normal. */
17224
17225 static bfd_boolean
17226 elf32_arm_write_section (bfd *output_bfd,
17227 struct bfd_link_info *link_info,
17228 asection *sec,
17229 bfd_byte *contents)
17230 {
17231 unsigned int mapcount, errcount;
17232 _arm_elf_section_data *arm_data;
17233 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17234 elf32_arm_section_map *map;
17235 elf32_vfp11_erratum_list *errnode;
17236 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17237 bfd_vma ptr;
17238 bfd_vma end;
17239 bfd_vma offset = sec->output_section->vma + sec->output_offset;
17240 bfd_byte tmp;
17241 unsigned int i;
17242
17243 if (globals == NULL)
17244 return FALSE;
17245
17246 /* If this section has not been allocated an _arm_elf_section_data
17247 structure then we cannot record anything. */
17248 arm_data = get_arm_elf_section_data (sec);
17249 if (arm_data == NULL)
17250 return FALSE;
17251
17252 mapcount = arm_data->mapcount;
17253 map = arm_data->map;
17254 errcount = arm_data->erratumcount;
17255
17256 if (errcount != 0)
17257 {
17258 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17259
17260 for (errnode = arm_data->erratumlist; errnode != 0;
17261 errnode = errnode->next)
17262 {
17263 bfd_vma target = errnode->vma - offset;
17264
17265 switch (errnode->type)
17266 {
17267 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17268 {
17269 bfd_vma branch_to_veneer;
17270 /* Original condition code of instruction, plus bit mask for
17271 ARM B instruction. */
17272 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17273 | 0x0a000000;
17274
17275 /* The instruction is before the label. */
17276 target -= 4;
17277
17278 /* Above offset included in -4 below. */
17279 branch_to_veneer = errnode->u.b.veneer->vma
17280 - errnode->vma - 4;
17281
17282 if ((signed) branch_to_veneer < -(1 << 25)
17283 || (signed) branch_to_veneer >= (1 << 25))
17284 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17285 "range"), output_bfd);
17286
17287 insn |= (branch_to_veneer >> 2) & 0xffffff;
17288 contents[endianflip ^ target] = insn & 0xff;
17289 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17290 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17291 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17292 }
17293 break;
17294
17295 case VFP11_ERRATUM_ARM_VENEER:
17296 {
17297 bfd_vma branch_from_veneer;
17298 unsigned int insn;
17299
17300 /* Take size of veneer into account. */
17301 branch_from_veneer = errnode->u.v.branch->vma
17302 - errnode->vma - 12;
17303
17304 if ((signed) branch_from_veneer < -(1 << 25)
17305 || (signed) branch_from_veneer >= (1 << 25))
17306 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17307 "range"), output_bfd);
17308
17309 /* Original instruction. */
17310 insn = errnode->u.v.branch->u.b.vfp_insn;
17311 contents[endianflip ^ target] = insn & 0xff;
17312 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17313 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17314 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17315
17316 /* Branch back to insn after original insn. */
17317 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17318 contents[endianflip ^ (target + 4)] = insn & 0xff;
17319 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17320 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17321 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17322 }
17323 break;
17324
17325 default:
17326 abort ();
17327 }
17328 }
17329 }
17330
17331 if (arm_data->stm32l4xx_erratumcount != 0)
17332 {
17333 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17334 stm32l4xx_errnode != 0;
17335 stm32l4xx_errnode = stm32l4xx_errnode->next)
17336 {
17337 bfd_vma target = stm32l4xx_errnode->vma - offset;
17338
17339 switch (stm32l4xx_errnode->type)
17340 {
17341 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17342 {
17343 unsigned int insn;
17344 bfd_vma branch_to_veneer =
17345 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17346
17347 if ((signed) branch_to_veneer < -(1 << 24)
17348 || (signed) branch_to_veneer >= (1 << 24))
17349 {
17350 bfd_vma out_of_range =
17351 ((signed) branch_to_veneer < -(1 << 24)) ?
17352 - branch_to_veneer - (1 << 24) :
17353 ((signed) branch_to_veneer >= (1 << 24)) ?
17354 branch_to_veneer - (1 << 24) : 0;
17355
17356 (*_bfd_error_handler)
17357 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17358 "Jump out of range by %ld bytes. "
17359 "Cannot encode branch instruction. "),
17360 output_bfd,
17361 (long) (stm32l4xx_errnode->vma - 4),
17362 out_of_range);
17363 continue;
17364 }
17365
17366 insn = create_instruction_branch_absolute
17367 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17368
17369 /* The instruction is before the label. */
17370 target -= 4;
17371
17372 put_thumb2_insn (globals, output_bfd,
17373 (bfd_vma) insn, contents + target);
17374 }
17375 break;
17376
17377 case STM32L4XX_ERRATUM_VENEER:
17378 {
17379 bfd_byte * veneer;
17380 bfd_byte * veneer_r;
17381 unsigned int insn;
17382
17383 veneer = contents + target;
17384 veneer_r = veneer
17385 + stm32l4xx_errnode->u.b.veneer->vma
17386 - stm32l4xx_errnode->vma - 4;
17387
17388 if ((signed) (veneer_r - veneer -
17389 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17390 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17391 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17392 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17393 || (signed) (veneer_r - veneer) >= (1 << 24))
17394 {
17395 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17396 "veneer."), output_bfd);
17397 continue;
17398 }
17399
17400 /* Original instruction. */
17401 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17402
17403 stm32l4xx_create_replacing_stub
17404 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17405 }
17406 break;
17407
17408 default:
17409 abort ();
17410 }
17411 }
17412 }
17413
17414 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17415 {
17416 arm_unwind_table_edit *edit_node
17417 = arm_data->u.exidx.unwind_edit_list;
17418 /* Now, sec->size is the size of the section we will write. The original
17419 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17420 markers) was sec->rawsize. (This isn't the case if we perform no
17421 edits, then rawsize will be zero and we should use size). */
17422 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17423 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17424 unsigned int in_index, out_index;
17425 bfd_vma add_to_offsets = 0;
17426
17427 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17428 {
17429 if (edit_node)
17430 {
17431 unsigned int edit_index = edit_node->index;
17432
17433 if (in_index < edit_index && in_index * 8 < input_size)
17434 {
17435 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17436 contents + in_index * 8, add_to_offsets);
17437 out_index++;
17438 in_index++;
17439 }
17440 else if (in_index == edit_index
17441 || (in_index * 8 >= input_size
17442 && edit_index == UINT_MAX))
17443 {
17444 switch (edit_node->type)
17445 {
17446 case DELETE_EXIDX_ENTRY:
17447 in_index++;
17448 add_to_offsets += 8;
17449 break;
17450
17451 case INSERT_EXIDX_CANTUNWIND_AT_END:
17452 {
17453 asection *text_sec = edit_node->linked_section;
17454 bfd_vma text_offset = text_sec->output_section->vma
17455 + text_sec->output_offset
17456 + text_sec->size;
17457 bfd_vma exidx_offset = offset + out_index * 8;
17458 unsigned long prel31_offset;
17459
17460 /* Note: this is meant to be equivalent to an
17461 R_ARM_PREL31 relocation. These synthetic
17462 EXIDX_CANTUNWIND markers are not relocated by the
17463 usual BFD method. */
17464 prel31_offset = (text_offset - exidx_offset)
17465 & 0x7ffffffful;
17466 if (bfd_link_relocatable (link_info))
17467 {
17468 /* Here relocation for new EXIDX_CANTUNWIND is
17469 created, so there is no need to
17470 adjust offset by hand. */
17471 prel31_offset = text_sec->output_offset
17472 + text_sec->size;
17473
17474 /* New relocation entity. */
17475 asection *text_out = text_sec->output_section;
17476 Elf_Internal_Rela rel;
17477 rel.r_addend = 0;
17478 rel.r_offset = exidx_offset;
17479 rel.r_info = ELF32_R_INFO (text_out->target_index,
17480 R_ARM_PREL31);
17481
17482 elf32_arm_add_relocation (output_bfd, link_info,
17483 sec->output_section,
17484 &rel);
17485 }
17486
17487 /* First address we can't unwind. */
17488 bfd_put_32 (output_bfd, prel31_offset,
17489 &edited_contents[out_index * 8]);
17490
17491 /* Code for EXIDX_CANTUNWIND. */
17492 bfd_put_32 (output_bfd, 0x1,
17493 &edited_contents[out_index * 8 + 4]);
17494
17495 out_index++;
17496 add_to_offsets -= 8;
17497 }
17498 break;
17499 }
17500
17501 edit_node = edit_node->next;
17502 }
17503 }
17504 else
17505 {
17506 /* No more edits, copy remaining entries verbatim. */
17507 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17508 contents + in_index * 8, add_to_offsets);
17509 out_index++;
17510 in_index++;
17511 }
17512 }
17513
17514 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17515 bfd_set_section_contents (output_bfd, sec->output_section,
17516 edited_contents,
17517 (file_ptr) sec->output_offset, sec->size);
17518
17519 return TRUE;
17520 }
17521
17522 /* Fix code to point to Cortex-A8 erratum stubs. */
17523 if (globals->fix_cortex_a8)
17524 {
17525 struct a8_branch_to_stub_data data;
17526
17527 data.writing_section = sec;
17528 data.contents = contents;
17529
17530 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17531 & data);
17532 }
17533
17534 if (mapcount == 0)
17535 return FALSE;
17536
17537 if (globals->byteswap_code)
17538 {
17539 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17540
17541 ptr = map[0].vma;
17542 for (i = 0; i < mapcount; i++)
17543 {
17544 if (i == mapcount - 1)
17545 end = sec->size;
17546 else
17547 end = map[i + 1].vma;
17548
17549 switch (map[i].type)
17550 {
17551 case 'a':
17552 /* Byte swap code words. */
17553 while (ptr + 3 < end)
17554 {
17555 tmp = contents[ptr];
17556 contents[ptr] = contents[ptr + 3];
17557 contents[ptr + 3] = tmp;
17558 tmp = contents[ptr + 1];
17559 contents[ptr + 1] = contents[ptr + 2];
17560 contents[ptr + 2] = tmp;
17561 ptr += 4;
17562 }
17563 break;
17564
17565 case 't':
17566 /* Byte swap code halfwords. */
17567 while (ptr + 1 < end)
17568 {
17569 tmp = contents[ptr];
17570 contents[ptr] = contents[ptr + 1];
17571 contents[ptr + 1] = tmp;
17572 ptr += 2;
17573 }
17574 break;
17575
17576 case 'd':
17577 /* Leave data alone. */
17578 break;
17579 }
17580 ptr = end;
17581 }
17582 }
17583
17584 free (map);
17585 arm_data->mapcount = -1;
17586 arm_data->mapsize = 0;
17587 arm_data->map = NULL;
17588
17589 return FALSE;
17590 }
17591
17592 /* Mangle thumb function symbols as we read them in. */
17593
17594 static bfd_boolean
17595 elf32_arm_swap_symbol_in (bfd * abfd,
17596 const void *psrc,
17597 const void *pshn,
17598 Elf_Internal_Sym *dst)
17599 {
17600 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17601 return FALSE;
17602 dst->st_target_internal = 0;
17603
17604 /* New EABI objects mark thumb function symbols by setting the low bit of
17605 the address. */
17606 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17607 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17608 {
17609 if (dst->st_value & 1)
17610 {
17611 dst->st_value &= ~(bfd_vma) 1;
17612 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17613 ST_BRANCH_TO_THUMB);
17614 }
17615 else
17616 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17617 }
17618 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17619 {
17620 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17621 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17622 }
17623 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17624 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17625 else
17626 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17627
17628 return TRUE;
17629 }
17630
17631
17632 /* Mangle thumb function symbols as we write them out. */
17633
17634 static void
17635 elf32_arm_swap_symbol_out (bfd *abfd,
17636 const Elf_Internal_Sym *src,
17637 void *cdst,
17638 void *shndx)
17639 {
17640 Elf_Internal_Sym newsym;
17641
17642 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17643 of the address set, as per the new EABI. We do this unconditionally
17644 because objcopy does not set the elf header flags until after
17645 it writes out the symbol table. */
17646 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17647 {
17648 newsym = *src;
17649 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17650 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17651 if (newsym.st_shndx != SHN_UNDEF)
17652 {
17653 /* Do this only for defined symbols. At link type, the static
17654 linker will simulate the work of dynamic linker of resolving
17655 symbols and will carry over the thumbness of found symbols to
17656 the output symbol table. It's not clear how it happens, but
17657 the thumbness of undefined symbols can well be different at
17658 runtime, and writing '1' for them will be confusing for users
17659 and possibly for dynamic linker itself.
17660 */
17661 newsym.st_value |= 1;
17662 }
17663
17664 src = &newsym;
17665 }
17666 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17667 }
17668
17669 /* Add the PT_ARM_EXIDX program header. */
17670
17671 static bfd_boolean
17672 elf32_arm_modify_segment_map (bfd *abfd,
17673 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17674 {
17675 struct elf_segment_map *m;
17676 asection *sec;
17677
17678 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17679 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17680 {
17681 /* If there is already a PT_ARM_EXIDX header, then we do not
17682 want to add another one. This situation arises when running
17683 "strip"; the input binary already has the header. */
17684 m = elf_seg_map (abfd);
17685 while (m && m->p_type != PT_ARM_EXIDX)
17686 m = m->next;
17687 if (!m)
17688 {
17689 m = (struct elf_segment_map *)
17690 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17691 if (m == NULL)
17692 return FALSE;
17693 m->p_type = PT_ARM_EXIDX;
17694 m->count = 1;
17695 m->sections[0] = sec;
17696
17697 m->next = elf_seg_map (abfd);
17698 elf_seg_map (abfd) = m;
17699 }
17700 }
17701
17702 return TRUE;
17703 }
17704
17705 /* We may add a PT_ARM_EXIDX program header. */
17706
17707 static int
17708 elf32_arm_additional_program_headers (bfd *abfd,
17709 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17710 {
17711 asection *sec;
17712
17713 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17714 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17715 return 1;
17716 else
17717 return 0;
17718 }
17719
17720 /* Hook called by the linker routine which adds symbols from an object
17721 file. */
17722
17723 static bfd_boolean
17724 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17725 Elf_Internal_Sym *sym, const char **namep,
17726 flagword *flagsp, asection **secp, bfd_vma *valp)
17727 {
17728 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17729 && (abfd->flags & DYNAMIC) == 0
17730 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17731 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17732
17733 if (elf32_arm_hash_table (info) == NULL)
17734 return FALSE;
17735
17736 if (elf32_arm_hash_table (info)->vxworks_p
17737 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17738 flagsp, secp, valp))
17739 return FALSE;
17740
17741 return TRUE;
17742 }
17743
17744 /* We use this to override swap_symbol_in and swap_symbol_out. */
17745 const struct elf_size_info elf32_arm_size_info =
17746 {
17747 sizeof (Elf32_External_Ehdr),
17748 sizeof (Elf32_External_Phdr),
17749 sizeof (Elf32_External_Shdr),
17750 sizeof (Elf32_External_Rel),
17751 sizeof (Elf32_External_Rela),
17752 sizeof (Elf32_External_Sym),
17753 sizeof (Elf32_External_Dyn),
17754 sizeof (Elf_External_Note),
17755 4,
17756 1,
17757 32, 2,
17758 ELFCLASS32, EV_CURRENT,
17759 bfd_elf32_write_out_phdrs,
17760 bfd_elf32_write_shdrs_and_ehdr,
17761 bfd_elf32_checksum_contents,
17762 bfd_elf32_write_relocs,
17763 elf32_arm_swap_symbol_in,
17764 elf32_arm_swap_symbol_out,
17765 bfd_elf32_slurp_reloc_table,
17766 bfd_elf32_slurp_symbol_table,
17767 bfd_elf32_swap_dyn_in,
17768 bfd_elf32_swap_dyn_out,
17769 bfd_elf32_swap_reloc_in,
17770 bfd_elf32_swap_reloc_out,
17771 bfd_elf32_swap_reloca_in,
17772 bfd_elf32_swap_reloca_out
17773 };
17774
17775 static bfd_vma
17776 read_code32 (const bfd *abfd, const bfd_byte *addr)
17777 {
17778 /* V7 BE8 code is always little endian. */
17779 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17780 return bfd_getl32 (addr);
17781
17782 return bfd_get_32 (abfd, addr);
17783 }
17784
17785 static bfd_vma
17786 read_code16 (const bfd *abfd, const bfd_byte *addr)
17787 {
17788 /* V7 BE8 code is always little endian. */
17789 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17790 return bfd_getl16 (addr);
17791
17792 return bfd_get_16 (abfd, addr);
17793 }
17794
17795 /* Return size of plt0 entry starting at ADDR
17796 or (bfd_vma) -1 if size can not be determined. */
17797
17798 static bfd_vma
17799 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17800 {
17801 bfd_vma first_word;
17802 bfd_vma plt0_size;
17803
17804 first_word = read_code32 (abfd, addr);
17805
17806 if (first_word == elf32_arm_plt0_entry[0])
17807 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
17808 else if (first_word == elf32_thumb2_plt0_entry[0])
17809 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
17810 else
17811 /* We don't yet handle this PLT format. */
17812 return (bfd_vma) -1;
17813
17814 return plt0_size;
17815 }
17816
17817 /* Return size of plt entry starting at offset OFFSET
17818 of plt section located at address START
17819 or (bfd_vma) -1 if size can not be determined. */
17820
17821 static bfd_vma
17822 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
17823 {
17824 bfd_vma first_insn;
17825 bfd_vma plt_size = 0;
17826 const bfd_byte *addr = start + offset;
17827
17828 /* PLT entry size if fixed on Thumb-only platforms. */
17829 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
17830 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
17831
17832 /* Respect Thumb stub if necessary. */
17833 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
17834 {
17835 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
17836 }
17837
17838 /* Strip immediate from first add. */
17839 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
17840
17841 #ifdef FOUR_WORD_PLT
17842 if (first_insn == elf32_arm_plt_entry[0])
17843 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
17844 #else
17845 if (first_insn == elf32_arm_plt_entry_long[0])
17846 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
17847 else if (first_insn == elf32_arm_plt_entry_short[0])
17848 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
17849 #endif
17850 else
17851 /* We don't yet handle this PLT format. */
17852 return (bfd_vma) -1;
17853
17854 return plt_size;
17855 }
17856
17857 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
17858
17859 static long
17860 elf32_arm_get_synthetic_symtab (bfd *abfd,
17861 long symcount ATTRIBUTE_UNUSED,
17862 asymbol **syms ATTRIBUTE_UNUSED,
17863 long dynsymcount,
17864 asymbol **dynsyms,
17865 asymbol **ret)
17866 {
17867 asection *relplt;
17868 asymbol *s;
17869 arelent *p;
17870 long count, i, n;
17871 size_t size;
17872 Elf_Internal_Shdr *hdr;
17873 char *names;
17874 asection *plt;
17875 bfd_vma offset;
17876 bfd_byte *data;
17877
17878 *ret = NULL;
17879
17880 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
17881 return 0;
17882
17883 if (dynsymcount <= 0)
17884 return 0;
17885
17886 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
17887 if (relplt == NULL)
17888 return 0;
17889
17890 hdr = &elf_section_data (relplt)->this_hdr;
17891 if (hdr->sh_link != elf_dynsymtab (abfd)
17892 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
17893 return 0;
17894
17895 plt = bfd_get_section_by_name (abfd, ".plt");
17896 if (plt == NULL)
17897 return 0;
17898
17899 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
17900 return -1;
17901
17902 data = plt->contents;
17903 if (data == NULL)
17904 {
17905 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
17906 return -1;
17907 bfd_cache_section_contents((asection *) plt, data);
17908 }
17909
17910 count = relplt->size / hdr->sh_entsize;
17911 size = count * sizeof (asymbol);
17912 p = relplt->relocation;
17913 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
17914 {
17915 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
17916 if (p->addend != 0)
17917 size += sizeof ("+0x") - 1 + 8;
17918 }
17919
17920 s = *ret = (asymbol *) bfd_malloc (size);
17921 if (s == NULL)
17922 return -1;
17923
17924 offset = elf32_arm_plt0_size (abfd, data);
17925 if (offset == (bfd_vma) -1)
17926 return -1;
17927
17928 names = (char *) (s + count);
17929 p = relplt->relocation;
17930 n = 0;
17931 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
17932 {
17933 size_t len;
17934
17935 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
17936 if (plt_size == (bfd_vma) -1)
17937 break;
17938
17939 *s = **p->sym_ptr_ptr;
17940 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
17941 we are defining a symbol, ensure one of them is set. */
17942 if ((s->flags & BSF_LOCAL) == 0)
17943 s->flags |= BSF_GLOBAL;
17944 s->flags |= BSF_SYNTHETIC;
17945 s->section = plt;
17946 s->value = offset;
17947 s->name = names;
17948 s->udata.p = NULL;
17949 len = strlen ((*p->sym_ptr_ptr)->name);
17950 memcpy (names, (*p->sym_ptr_ptr)->name, len);
17951 names += len;
17952 if (p->addend != 0)
17953 {
17954 char buf[30], *a;
17955
17956 memcpy (names, "+0x", sizeof ("+0x") - 1);
17957 names += sizeof ("+0x") - 1;
17958 bfd_sprintf_vma (abfd, buf, p->addend);
17959 for (a = buf; *a == '0'; ++a)
17960 ;
17961 len = strlen (a);
17962 memcpy (names, a, len);
17963 names += len;
17964 }
17965 memcpy (names, "@plt", sizeof ("@plt"));
17966 names += sizeof ("@plt");
17967 ++s, ++n;
17968 offset += plt_size;
17969 }
17970
17971 return n;
17972 }
17973
17974 static bfd_boolean
17975 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
17976 {
17977 if (hdr->sh_flags & SHF_ARM_NOREAD)
17978 *flags |= SEC_ELF_NOREAD;
17979 return TRUE;
17980 }
17981
17982 static flagword
17983 elf32_arm_lookup_section_flags (char *flag_name)
17984 {
17985 if (!strcmp (flag_name, "SHF_ARM_NOREAD"))
17986 return SHF_ARM_NOREAD;
17987
17988 return SEC_NO_FLAGS;
17989 }
17990
17991 static unsigned int
17992 elf32_arm_count_additional_relocs (asection *sec)
17993 {
17994 struct _arm_elf_section_data *arm_data;
17995 arm_data = get_arm_elf_section_data (sec);
17996 return arm_data->additional_reloc_count;
17997 }
17998
17999 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18000 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18001 FALSE otherwise. ISECTION is the best guess matching section from the
18002 input bfd IBFD, but it might be NULL. */
18003
18004 static bfd_boolean
18005 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18006 bfd *obfd ATTRIBUTE_UNUSED,
18007 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18008 Elf_Internal_Shdr *osection)
18009 {
18010 switch (osection->sh_type)
18011 {
18012 case SHT_ARM_EXIDX:
18013 {
18014 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18015 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18016 unsigned i = 0;
18017
18018 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18019 osection->sh_info = 0;
18020
18021 /* The sh_link field must be set to the text section associated with
18022 this index section. Unfortunately the ARM EHABI does not specify
18023 exactly how to determine this association. Our caller does try
18024 to match up OSECTION with its corresponding input section however
18025 so that is a good first guess. */
18026 if (isection != NULL
18027 && osection->bfd_section != NULL
18028 && isection->bfd_section != NULL
18029 && isection->bfd_section->output_section != NULL
18030 && isection->bfd_section->output_section == osection->bfd_section
18031 && iheaders != NULL
18032 && isection->sh_link > 0
18033 && isection->sh_link < elf_numsections (ibfd)
18034 && iheaders[isection->sh_link]->bfd_section != NULL
18035 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18036 )
18037 {
18038 for (i = elf_numsections (obfd); i-- > 0;)
18039 if (oheaders[i]->bfd_section
18040 == iheaders[isection->sh_link]->bfd_section->output_section)
18041 break;
18042 }
18043
18044 if (i == 0)
18045 {
18046 /* Failing that we have to find a matching section ourselves. If
18047 we had the output section name available we could compare that
18048 with input section names. Unfortunately we don't. So instead
18049 we use a simple heuristic and look for the nearest executable
18050 section before this one. */
18051 for (i = elf_numsections (obfd); i-- > 0;)
18052 if (oheaders[i] == osection)
18053 break;
18054 if (i == 0)
18055 break;
18056
18057 while (i-- > 0)
18058 if (oheaders[i]->sh_type == SHT_PROGBITS
18059 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18060 == (SHF_ALLOC | SHF_EXECINSTR))
18061 break;
18062 }
18063
18064 if (i)
18065 {
18066 osection->sh_link = i;
18067 /* If the text section was part of a group
18068 then the index section should be too. */
18069 if (oheaders[i]->sh_flags & SHF_GROUP)
18070 osection->sh_flags |= SHF_GROUP;
18071 return TRUE;
18072 }
18073 }
18074 break;
18075
18076 case SHT_ARM_PREEMPTMAP:
18077 osection->sh_flags = SHF_ALLOC;
18078 break;
18079
18080 case SHT_ARM_ATTRIBUTES:
18081 case SHT_ARM_DEBUGOVERLAY:
18082 case SHT_ARM_OVERLAYSECTION:
18083 default:
18084 break;
18085 }
18086
18087 return FALSE;
18088 }
18089
18090 #undef elf_backend_copy_special_section_fields
18091 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18092
18093 #define ELF_ARCH bfd_arch_arm
18094 #define ELF_TARGET_ID ARM_ELF_DATA
18095 #define ELF_MACHINE_CODE EM_ARM
18096 #ifdef __QNXTARGET__
18097 #define ELF_MAXPAGESIZE 0x1000
18098 #else
18099 #define ELF_MAXPAGESIZE 0x10000
18100 #endif
18101 #define ELF_MINPAGESIZE 0x1000
18102 #define ELF_COMMONPAGESIZE 0x1000
18103
18104 #define bfd_elf32_mkobject elf32_arm_mkobject
18105
18106 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18107 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18108 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18109 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18110 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18111 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18112 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18113 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18114 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18115 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18116 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18117 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18118 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18119
18120 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18121 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18122 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18123 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18124 #define elf_backend_check_relocs elf32_arm_check_relocs
18125 #define elf_backend_relocate_section elf32_arm_relocate_section
18126 #define elf_backend_write_section elf32_arm_write_section
18127 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18128 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18129 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18130 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18131 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18132 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18133 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18134 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18135 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18136 #define elf_backend_object_p elf32_arm_object_p
18137 #define elf_backend_fake_sections elf32_arm_fake_sections
18138 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18139 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18140 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18141 #define elf_backend_size_info elf32_arm_size_info
18142 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18143 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18144 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18145 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18146 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18147 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18148
18149 #define elf_backend_can_refcount 1
18150 #define elf_backend_can_gc_sections 1
18151 #define elf_backend_plt_readonly 1
18152 #define elf_backend_want_got_plt 1
18153 #define elf_backend_want_plt_sym 0
18154 #define elf_backend_may_use_rel_p 1
18155 #define elf_backend_may_use_rela_p 0
18156 #define elf_backend_default_use_rela_p 0
18157
18158 #define elf_backend_got_header_size 12
18159 #define elf_backend_extern_protected_data 1
18160
18161 #undef elf_backend_obj_attrs_vendor
18162 #define elf_backend_obj_attrs_vendor "aeabi"
18163 #undef elf_backend_obj_attrs_section
18164 #define elf_backend_obj_attrs_section ".ARM.attributes"
18165 #undef elf_backend_obj_attrs_arg_type
18166 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18167 #undef elf_backend_obj_attrs_section_type
18168 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18169 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18170 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18171
18172 #undef elf_backend_section_flags
18173 #define elf_backend_section_flags elf32_arm_section_flags
18174 #undef elf_backend_lookup_section_flags_hook
18175 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18176
18177 #include "elf32-target.h"
18178
18179 /* Native Client targets. */
18180
18181 #undef TARGET_LITTLE_SYM
18182 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18183 #undef TARGET_LITTLE_NAME
18184 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18185 #undef TARGET_BIG_SYM
18186 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18187 #undef TARGET_BIG_NAME
18188 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18189
18190 /* Like elf32_arm_link_hash_table_create -- but overrides
18191 appropriately for NaCl. */
18192
18193 static struct bfd_link_hash_table *
18194 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18195 {
18196 struct bfd_link_hash_table *ret;
18197
18198 ret = elf32_arm_link_hash_table_create (abfd);
18199 if (ret)
18200 {
18201 struct elf32_arm_link_hash_table *htab
18202 = (struct elf32_arm_link_hash_table *) ret;
18203
18204 htab->nacl_p = 1;
18205
18206 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18207 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18208 }
18209 return ret;
18210 }
18211
18212 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18213 really need to use elf32_arm_modify_segment_map. But we do it
18214 anyway just to reduce gratuitous differences with the stock ARM backend. */
18215
18216 static bfd_boolean
18217 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18218 {
18219 return (elf32_arm_modify_segment_map (abfd, info)
18220 && nacl_modify_segment_map (abfd, info));
18221 }
18222
18223 static void
18224 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18225 {
18226 elf32_arm_final_write_processing (abfd, linker);
18227 nacl_final_write_processing (abfd, linker);
18228 }
18229
18230 static bfd_vma
18231 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18232 const arelent *rel ATTRIBUTE_UNUSED)
18233 {
18234 return plt->vma
18235 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18236 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18237 }
18238
18239 #undef elf32_bed
18240 #define elf32_bed elf32_arm_nacl_bed
18241 #undef bfd_elf32_bfd_link_hash_table_create
18242 #define bfd_elf32_bfd_link_hash_table_create \
18243 elf32_arm_nacl_link_hash_table_create
18244 #undef elf_backend_plt_alignment
18245 #define elf_backend_plt_alignment 4
18246 #undef elf_backend_modify_segment_map
18247 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18248 #undef elf_backend_modify_program_headers
18249 #define elf_backend_modify_program_headers nacl_modify_program_headers
18250 #undef elf_backend_final_write_processing
18251 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18252 #undef bfd_elf32_get_synthetic_symtab
18253 #undef elf_backend_plt_sym_val
18254 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18255 #undef elf_backend_copy_special_section_fields
18256
18257 #undef ELF_MINPAGESIZE
18258 #undef ELF_COMMONPAGESIZE
18259
18260
18261 #include "elf32-target.h"
18262
18263 /* Reset to defaults. */
18264 #undef elf_backend_plt_alignment
18265 #undef elf_backend_modify_segment_map
18266 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18267 #undef elf_backend_modify_program_headers
18268 #undef elf_backend_final_write_processing
18269 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18270 #undef ELF_MINPAGESIZE
18271 #define ELF_MINPAGESIZE 0x1000
18272 #undef ELF_COMMONPAGESIZE
18273 #define ELF_COMMONPAGESIZE 0x1000
18274
18275
18276 /* VxWorks Targets. */
18277
18278 #undef TARGET_LITTLE_SYM
18279 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18280 #undef TARGET_LITTLE_NAME
18281 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18282 #undef TARGET_BIG_SYM
18283 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18284 #undef TARGET_BIG_NAME
18285 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18286
18287 /* Like elf32_arm_link_hash_table_create -- but overrides
18288 appropriately for VxWorks. */
18289
18290 static struct bfd_link_hash_table *
18291 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18292 {
18293 struct bfd_link_hash_table *ret;
18294
18295 ret = elf32_arm_link_hash_table_create (abfd);
18296 if (ret)
18297 {
18298 struct elf32_arm_link_hash_table *htab
18299 = (struct elf32_arm_link_hash_table *) ret;
18300 htab->use_rel = 0;
18301 htab->vxworks_p = 1;
18302 }
18303 return ret;
18304 }
18305
18306 static void
18307 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18308 {
18309 elf32_arm_final_write_processing (abfd, linker);
18310 elf_vxworks_final_write_processing (abfd, linker);
18311 }
18312
18313 #undef elf32_bed
18314 #define elf32_bed elf32_arm_vxworks_bed
18315
18316 #undef bfd_elf32_bfd_link_hash_table_create
18317 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18318 #undef elf_backend_final_write_processing
18319 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18320 #undef elf_backend_emit_relocs
18321 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18322
18323 #undef elf_backend_may_use_rel_p
18324 #define elf_backend_may_use_rel_p 0
18325 #undef elf_backend_may_use_rela_p
18326 #define elf_backend_may_use_rela_p 1
18327 #undef elf_backend_default_use_rela_p
18328 #define elf_backend_default_use_rela_p 1
18329 #undef elf_backend_want_plt_sym
18330 #define elf_backend_want_plt_sym 1
18331 #undef ELF_MAXPAGESIZE
18332 #define ELF_MAXPAGESIZE 0x1000
18333
18334 #include "elf32-target.h"
18335
18336
18337 /* Merge backend specific data from an object file to the output
18338 object file when linking. */
18339
18340 static bfd_boolean
18341 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18342 {
18343 flagword out_flags;
18344 flagword in_flags;
18345 bfd_boolean flags_compatible = TRUE;
18346 asection *sec;
18347
18348 /* Check if we have the same endianness. */
18349 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18350 return FALSE;
18351
18352 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18353 return TRUE;
18354
18355 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18356 return FALSE;
18357
18358 /* The input BFD must have had its flags initialised. */
18359 /* The following seems bogus to me -- The flags are initialized in
18360 the assembler but I don't think an elf_flags_init field is
18361 written into the object. */
18362 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18363
18364 in_flags = elf_elfheader (ibfd)->e_flags;
18365 out_flags = elf_elfheader (obfd)->e_flags;
18366
18367 /* In theory there is no reason why we couldn't handle this. However
18368 in practice it isn't even close to working and there is no real
18369 reason to want it. */
18370 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18371 && !(ibfd->flags & DYNAMIC)
18372 && (in_flags & EF_ARM_BE8))
18373 {
18374 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18375 ibfd);
18376 return FALSE;
18377 }
18378
18379 if (!elf_flags_init (obfd))
18380 {
18381 /* If the input is the default architecture and had the default
18382 flags then do not bother setting the flags for the output
18383 architecture, instead allow future merges to do this. If no
18384 future merges ever set these flags then they will retain their
18385 uninitialised values, which surprise surprise, correspond
18386 to the default values. */
18387 if (bfd_get_arch_info (ibfd)->the_default
18388 && elf_elfheader (ibfd)->e_flags == 0)
18389 return TRUE;
18390
18391 elf_flags_init (obfd) = TRUE;
18392 elf_elfheader (obfd)->e_flags = in_flags;
18393
18394 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18395 && bfd_get_arch_info (obfd)->the_default)
18396 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18397
18398 return TRUE;
18399 }
18400
18401 /* Determine what should happen if the input ARM architecture
18402 does not match the output ARM architecture. */
18403 if (! bfd_arm_merge_machines (ibfd, obfd))
18404 return FALSE;
18405
18406 /* Identical flags must be compatible. */
18407 if (in_flags == out_flags)
18408 return TRUE;
18409
18410 /* Check to see if the input BFD actually contains any sections. If
18411 not, its flags may not have been initialised either, but it
18412 cannot actually cause any incompatiblity. Do not short-circuit
18413 dynamic objects; their section list may be emptied by
18414 elf_link_add_object_symbols.
18415
18416 Also check to see if there are no code sections in the input.
18417 In this case there is no need to check for code specific flags.
18418 XXX - do we need to worry about floating-point format compatability
18419 in data sections ? */
18420 if (!(ibfd->flags & DYNAMIC))
18421 {
18422 bfd_boolean null_input_bfd = TRUE;
18423 bfd_boolean only_data_sections = TRUE;
18424
18425 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18426 {
18427 /* Ignore synthetic glue sections. */
18428 if (strcmp (sec->name, ".glue_7")
18429 && strcmp (sec->name, ".glue_7t"))
18430 {
18431 if ((bfd_get_section_flags (ibfd, sec)
18432 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18433 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18434 only_data_sections = FALSE;
18435
18436 null_input_bfd = FALSE;
18437 break;
18438 }
18439 }
18440
18441 if (null_input_bfd || only_data_sections)
18442 return TRUE;
18443 }
18444
18445 /* Complain about various flag mismatches. */
18446 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18447 EF_ARM_EABI_VERSION (out_flags)))
18448 {
18449 _bfd_error_handler
18450 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18451 ibfd, obfd,
18452 (in_flags & EF_ARM_EABIMASK) >> 24,
18453 (out_flags & EF_ARM_EABIMASK) >> 24);
18454 return FALSE;
18455 }
18456
18457 /* Not sure what needs to be checked for EABI versions >= 1. */
18458 /* VxWorks libraries do not use these flags. */
18459 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18460 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18461 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18462 {
18463 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18464 {
18465 _bfd_error_handler
18466 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18467 ibfd, obfd,
18468 in_flags & EF_ARM_APCS_26 ? 26 : 32,
18469 out_flags & EF_ARM_APCS_26 ? 26 : 32);
18470 flags_compatible = FALSE;
18471 }
18472
18473 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18474 {
18475 if (in_flags & EF_ARM_APCS_FLOAT)
18476 _bfd_error_handler
18477 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18478 ibfd, obfd);
18479 else
18480 _bfd_error_handler
18481 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18482 ibfd, obfd);
18483
18484 flags_compatible = FALSE;
18485 }
18486
18487 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18488 {
18489 if (in_flags & EF_ARM_VFP_FLOAT)
18490 _bfd_error_handler
18491 (_("error: %B uses VFP instructions, whereas %B does not"),
18492 ibfd, obfd);
18493 else
18494 _bfd_error_handler
18495 (_("error: %B uses FPA instructions, whereas %B does not"),
18496 ibfd, obfd);
18497
18498 flags_compatible = FALSE;
18499 }
18500
18501 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18502 {
18503 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18504 _bfd_error_handler
18505 (_("error: %B uses Maverick instructions, whereas %B does not"),
18506 ibfd, obfd);
18507 else
18508 _bfd_error_handler
18509 (_("error: %B does not use Maverick instructions, whereas %B does"),
18510 ibfd, obfd);
18511
18512 flags_compatible = FALSE;
18513 }
18514
18515 #ifdef EF_ARM_SOFT_FLOAT
18516 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18517 {
18518 /* We can allow interworking between code that is VFP format
18519 layout, and uses either soft float or integer regs for
18520 passing floating point arguments and results. We already
18521 know that the APCS_FLOAT flags match; similarly for VFP
18522 flags. */
18523 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18524 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18525 {
18526 if (in_flags & EF_ARM_SOFT_FLOAT)
18527 _bfd_error_handler
18528 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18529 ibfd, obfd);
18530 else
18531 _bfd_error_handler
18532 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18533 ibfd, obfd);
18534
18535 flags_compatible = FALSE;
18536 }
18537 }
18538 #endif
18539
18540 /* Interworking mismatch is only a warning. */
18541 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18542 {
18543 if (in_flags & EF_ARM_INTERWORK)
18544 {
18545 _bfd_error_handler
18546 (_("Warning: %B supports interworking, whereas %B does not"),
18547 ibfd, obfd);
18548 }
18549 else
18550 {
18551 _bfd_error_handler
18552 (_("Warning: %B does not support interworking, whereas %B does"),
18553 ibfd, obfd);
18554 }
18555 }
18556 }
18557
18558 return flags_compatible;
18559 }
18560
18561
18562 /* Symbian OS Targets. */
18563
18564 #undef TARGET_LITTLE_SYM
18565 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18566 #undef TARGET_LITTLE_NAME
18567 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18568 #undef TARGET_BIG_SYM
18569 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18570 #undef TARGET_BIG_NAME
18571 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18572
18573 /* Like elf32_arm_link_hash_table_create -- but overrides
18574 appropriately for Symbian OS. */
18575
18576 static struct bfd_link_hash_table *
18577 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18578 {
18579 struct bfd_link_hash_table *ret;
18580
18581 ret = elf32_arm_link_hash_table_create (abfd);
18582 if (ret)
18583 {
18584 struct elf32_arm_link_hash_table *htab
18585 = (struct elf32_arm_link_hash_table *)ret;
18586 /* There is no PLT header for Symbian OS. */
18587 htab->plt_header_size = 0;
18588 /* The PLT entries are each one instruction and one word. */
18589 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18590 htab->symbian_p = 1;
18591 /* Symbian uses armv5t or above, so use_blx is always true. */
18592 htab->use_blx = 1;
18593 htab->root.is_relocatable_executable = 1;
18594 }
18595 return ret;
18596 }
18597
18598 static const struct bfd_elf_special_section
18599 elf32_arm_symbian_special_sections[] =
18600 {
18601 /* In a BPABI executable, the dynamic linking sections do not go in
18602 the loadable read-only segment. The post-linker may wish to
18603 refer to these sections, but they are not part of the final
18604 program image. */
18605 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18606 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18607 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18608 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18609 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18610 /* These sections do not need to be writable as the SymbianOS
18611 postlinker will arrange things so that no dynamic relocation is
18612 required. */
18613 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18614 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18615 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18616 { NULL, 0, 0, 0, 0 }
18617 };
18618
18619 static void
18620 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18621 struct bfd_link_info *link_info)
18622 {
18623 /* BPABI objects are never loaded directly by an OS kernel; they are
18624 processed by a postlinker first, into an OS-specific format. If
18625 the D_PAGED bit is set on the file, BFD will align segments on
18626 page boundaries, so that an OS can directly map the file. With
18627 BPABI objects, that just results in wasted space. In addition,
18628 because we clear the D_PAGED bit, map_sections_to_segments will
18629 recognize that the program headers should not be mapped into any
18630 loadable segment. */
18631 abfd->flags &= ~D_PAGED;
18632 elf32_arm_begin_write_processing (abfd, link_info);
18633 }
18634
18635 static bfd_boolean
18636 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18637 struct bfd_link_info *info)
18638 {
18639 struct elf_segment_map *m;
18640 asection *dynsec;
18641
18642 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18643 segment. However, because the .dynamic section is not marked
18644 with SEC_LOAD, the generic ELF code will not create such a
18645 segment. */
18646 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18647 if (dynsec)
18648 {
18649 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18650 if (m->p_type == PT_DYNAMIC)
18651 break;
18652
18653 if (m == NULL)
18654 {
18655 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18656 m->next = elf_seg_map (abfd);
18657 elf_seg_map (abfd) = m;
18658 }
18659 }
18660
18661 /* Also call the generic arm routine. */
18662 return elf32_arm_modify_segment_map (abfd, info);
18663 }
18664
18665 /* Return address for Ith PLT stub in section PLT, for relocation REL
18666 or (bfd_vma) -1 if it should not be included. */
18667
18668 static bfd_vma
18669 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18670 const arelent *rel ATTRIBUTE_UNUSED)
18671 {
18672 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18673 }
18674
18675 #undef elf32_bed
18676 #define elf32_bed elf32_arm_symbian_bed
18677
18678 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18679 will process them and then discard them. */
18680 #undef ELF_DYNAMIC_SEC_FLAGS
18681 #define ELF_DYNAMIC_SEC_FLAGS \
18682 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18683
18684 #undef elf_backend_emit_relocs
18685
18686 #undef bfd_elf32_bfd_link_hash_table_create
18687 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18688 #undef elf_backend_special_sections
18689 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18690 #undef elf_backend_begin_write_processing
18691 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18692 #undef elf_backend_final_write_processing
18693 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18694
18695 #undef elf_backend_modify_segment_map
18696 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18697
18698 /* There is no .got section for BPABI objects, and hence no header. */
18699 #undef elf_backend_got_header_size
18700 #define elf_backend_got_header_size 0
18701
18702 /* Similarly, there is no .got.plt section. */
18703 #undef elf_backend_want_got_plt
18704 #define elf_backend_want_got_plt 0
18705
18706 #undef elf_backend_plt_sym_val
18707 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18708
18709 #undef elf_backend_may_use_rel_p
18710 #define elf_backend_may_use_rel_p 1
18711 #undef elf_backend_may_use_rela_p
18712 #define elf_backend_may_use_rela_p 0
18713 #undef elf_backend_default_use_rela_p
18714 #define elf_backend_default_use_rela_p 0
18715 #undef elf_backend_want_plt_sym
18716 #define elf_backend_want_plt_sym 0
18717 #undef ELF_MAXPAGESIZE
18718 #define ELF_MAXPAGESIZE 0x8000
18719
18720 #include "elf32-target.h"
This page took 0.539694 seconds and 4 git commands to generate.