* dw2gencfi.c (tc_cfi_endproc): Avoid "set but not used" error.
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73 static reloc_howto_type elf32_arm_howto_table_1[] =
74 {
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_TLS_DESC, /* type */
265 0, /* rightshift */
266 2, /* size (0 = byte, 1 = short, 2 = long) */
267 32, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_bitfield,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_TLS_DESC", /* name */
273 FALSE, /* partial_inplace */
274 0xffffffff, /* src_mask */
275 0xffffffff, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1356 0, /* rightshift */
1357 2, /* size (0 = byte, 1 = short, 2 = long) */
1358 32, /* bitsize */
1359 FALSE, /* pc_relative */
1360 0, /* bitpos */
1361 complain_overflow_bitfield,/* complain_on_overflow */
1362 NULL, /* special_function */
1363 "R_ARM_TLS_GOTDESC", /* name */
1364 TRUE, /* partial_inplace */
1365 0xffffffff, /* src_mask */
1366 0xffffffff, /* dst_mask */
1367 FALSE), /* pcrel_offset */
1368
1369 HOWTO (R_ARM_TLS_CALL, /* type */
1370 0, /* rightshift */
1371 2, /* size (0 = byte, 1 = short, 2 = long) */
1372 24, /* bitsize */
1373 FALSE, /* pc_relative */
1374 0, /* bitpos */
1375 complain_overflow_dont,/* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1377 "R_ARM_TLS_CALL", /* name */
1378 FALSE, /* partial_inplace */
1379 0x00ffffff, /* src_mask */
1380 0x00ffffff, /* dst_mask */
1381 FALSE), /* pcrel_offset */
1382
1383 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1384 0, /* rightshift */
1385 2, /* size (0 = byte, 1 = short, 2 = long) */
1386 0, /* bitsize */
1387 FALSE, /* pc_relative */
1388 0, /* bitpos */
1389 complain_overflow_bitfield,/* complain_on_overflow */
1390 bfd_elf_generic_reloc, /* special_function */
1391 "R_ARM_TLS_DESCSEQ", /* name */
1392 FALSE, /* partial_inplace */
1393 0x00000000, /* src_mask */
1394 0x00000000, /* dst_mask */
1395 FALSE), /* pcrel_offset */
1396
1397 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1398 0, /* rightshift */
1399 2, /* size (0 = byte, 1 = short, 2 = long) */
1400 24, /* bitsize */
1401 FALSE, /* pc_relative */
1402 0, /* bitpos */
1403 complain_overflow_dont,/* complain_on_overflow */
1404 bfd_elf_generic_reloc, /* special_function */
1405 "R_ARM_THM_TLS_CALL", /* name */
1406 FALSE, /* partial_inplace */
1407 0x07ff07ff, /* src_mask */
1408 0x07ff07ff, /* dst_mask */
1409 FALSE), /* pcrel_offset */
1410
1411 HOWTO (R_ARM_PLT32_ABS, /* type */
1412 0, /* rightshift */
1413 2, /* size (0 = byte, 1 = short, 2 = long) */
1414 32, /* bitsize */
1415 FALSE, /* pc_relative */
1416 0, /* bitpos */
1417 complain_overflow_dont,/* complain_on_overflow */
1418 bfd_elf_generic_reloc, /* special_function */
1419 "R_ARM_PLT32_ABS", /* name */
1420 FALSE, /* partial_inplace */
1421 0xffffffff, /* src_mask */
1422 0xffffffff, /* dst_mask */
1423 FALSE), /* pcrel_offset */
1424
1425 HOWTO (R_ARM_GOT_ABS, /* type */
1426 0, /* rightshift */
1427 2, /* size (0 = byte, 1 = short, 2 = long) */
1428 32, /* bitsize */
1429 FALSE, /* pc_relative */
1430 0, /* bitpos */
1431 complain_overflow_dont,/* complain_on_overflow */
1432 bfd_elf_generic_reloc, /* special_function */
1433 "R_ARM_GOT_ABS", /* name */
1434 FALSE, /* partial_inplace */
1435 0xffffffff, /* src_mask */
1436 0xffffffff, /* dst_mask */
1437 FALSE), /* pcrel_offset */
1438
1439 HOWTO (R_ARM_GOT_PREL, /* type */
1440 0, /* rightshift */
1441 2, /* size (0 = byte, 1 = short, 2 = long) */
1442 32, /* bitsize */
1443 TRUE, /* pc_relative */
1444 0, /* bitpos */
1445 complain_overflow_dont, /* complain_on_overflow */
1446 bfd_elf_generic_reloc, /* special_function */
1447 "R_ARM_GOT_PREL", /* name */
1448 FALSE, /* partial_inplace */
1449 0xffffffff, /* src_mask */
1450 0xffffffff, /* dst_mask */
1451 TRUE), /* pcrel_offset */
1452
1453 HOWTO (R_ARM_GOT_BREL12, /* type */
1454 0, /* rightshift */
1455 2, /* size (0 = byte, 1 = short, 2 = long) */
1456 12, /* bitsize */
1457 FALSE, /* pc_relative */
1458 0, /* bitpos */
1459 complain_overflow_bitfield,/* complain_on_overflow */
1460 bfd_elf_generic_reloc, /* special_function */
1461 "R_ARM_GOT_BREL12", /* name */
1462 FALSE, /* partial_inplace */
1463 0x00000fff, /* src_mask */
1464 0x00000fff, /* dst_mask */
1465 FALSE), /* pcrel_offset */
1466
1467 HOWTO (R_ARM_GOTOFF12, /* type */
1468 0, /* rightshift */
1469 2, /* size (0 = byte, 1 = short, 2 = long) */
1470 12, /* bitsize */
1471 FALSE, /* pc_relative */
1472 0, /* bitpos */
1473 complain_overflow_bitfield,/* complain_on_overflow */
1474 bfd_elf_generic_reloc, /* special_function */
1475 "R_ARM_GOTOFF12", /* name */
1476 FALSE, /* partial_inplace */
1477 0x00000fff, /* src_mask */
1478 0x00000fff, /* dst_mask */
1479 FALSE), /* pcrel_offset */
1480
1481 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1482
1483 /* GNU extension to record C++ vtable member usage */
1484 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1485 0, /* rightshift */
1486 2, /* size (0 = byte, 1 = short, 2 = long) */
1487 0, /* bitsize */
1488 FALSE, /* pc_relative */
1489 0, /* bitpos */
1490 complain_overflow_dont, /* complain_on_overflow */
1491 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1492 "R_ARM_GNU_VTENTRY", /* name */
1493 FALSE, /* partial_inplace */
1494 0, /* src_mask */
1495 0, /* dst_mask */
1496 FALSE), /* pcrel_offset */
1497
1498 /* GNU extension to record C++ vtable hierarchy */
1499 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1500 0, /* rightshift */
1501 2, /* size (0 = byte, 1 = short, 2 = long) */
1502 0, /* bitsize */
1503 FALSE, /* pc_relative */
1504 0, /* bitpos */
1505 complain_overflow_dont, /* complain_on_overflow */
1506 NULL, /* special_function */
1507 "R_ARM_GNU_VTINHERIT", /* name */
1508 FALSE, /* partial_inplace */
1509 0, /* src_mask */
1510 0, /* dst_mask */
1511 FALSE), /* pcrel_offset */
1512
1513 HOWTO (R_ARM_THM_JUMP11, /* type */
1514 1, /* rightshift */
1515 1, /* size (0 = byte, 1 = short, 2 = long) */
1516 11, /* bitsize */
1517 TRUE, /* pc_relative */
1518 0, /* bitpos */
1519 complain_overflow_signed, /* complain_on_overflow */
1520 bfd_elf_generic_reloc, /* special_function */
1521 "R_ARM_THM_JUMP11", /* name */
1522 FALSE, /* partial_inplace */
1523 0x000007ff, /* src_mask */
1524 0x000007ff, /* dst_mask */
1525 TRUE), /* pcrel_offset */
1526
1527 HOWTO (R_ARM_THM_JUMP8, /* type */
1528 1, /* rightshift */
1529 1, /* size (0 = byte, 1 = short, 2 = long) */
1530 8, /* bitsize */
1531 TRUE, /* pc_relative */
1532 0, /* bitpos */
1533 complain_overflow_signed, /* complain_on_overflow */
1534 bfd_elf_generic_reloc, /* special_function */
1535 "R_ARM_THM_JUMP8", /* name */
1536 FALSE, /* partial_inplace */
1537 0x000000ff, /* src_mask */
1538 0x000000ff, /* dst_mask */
1539 TRUE), /* pcrel_offset */
1540
1541 /* TLS relocations */
1542 HOWTO (R_ARM_TLS_GD32, /* type */
1543 0, /* rightshift */
1544 2, /* size (0 = byte, 1 = short, 2 = long) */
1545 32, /* bitsize */
1546 FALSE, /* pc_relative */
1547 0, /* bitpos */
1548 complain_overflow_bitfield,/* complain_on_overflow */
1549 NULL, /* special_function */
1550 "R_ARM_TLS_GD32", /* name */
1551 TRUE, /* partial_inplace */
1552 0xffffffff, /* src_mask */
1553 0xffffffff, /* dst_mask */
1554 FALSE), /* pcrel_offset */
1555
1556 HOWTO (R_ARM_TLS_LDM32, /* type */
1557 0, /* rightshift */
1558 2, /* size (0 = byte, 1 = short, 2 = long) */
1559 32, /* bitsize */
1560 FALSE, /* pc_relative */
1561 0, /* bitpos */
1562 complain_overflow_bitfield,/* complain_on_overflow */
1563 bfd_elf_generic_reloc, /* special_function */
1564 "R_ARM_TLS_LDM32", /* name */
1565 TRUE, /* partial_inplace */
1566 0xffffffff, /* src_mask */
1567 0xffffffff, /* dst_mask */
1568 FALSE), /* pcrel_offset */
1569
1570 HOWTO (R_ARM_TLS_LDO32, /* type */
1571 0, /* rightshift */
1572 2, /* size (0 = byte, 1 = short, 2 = long) */
1573 32, /* bitsize */
1574 FALSE, /* pc_relative */
1575 0, /* bitpos */
1576 complain_overflow_bitfield,/* complain_on_overflow */
1577 bfd_elf_generic_reloc, /* special_function */
1578 "R_ARM_TLS_LDO32", /* name */
1579 TRUE, /* partial_inplace */
1580 0xffffffff, /* src_mask */
1581 0xffffffff, /* dst_mask */
1582 FALSE), /* pcrel_offset */
1583
1584 HOWTO (R_ARM_TLS_IE32, /* type */
1585 0, /* rightshift */
1586 2, /* size (0 = byte, 1 = short, 2 = long) */
1587 32, /* bitsize */
1588 FALSE, /* pc_relative */
1589 0, /* bitpos */
1590 complain_overflow_bitfield,/* complain_on_overflow */
1591 NULL, /* special_function */
1592 "R_ARM_TLS_IE32", /* name */
1593 TRUE, /* partial_inplace */
1594 0xffffffff, /* src_mask */
1595 0xffffffff, /* dst_mask */
1596 FALSE), /* pcrel_offset */
1597
1598 HOWTO (R_ARM_TLS_LE32, /* type */
1599 0, /* rightshift */
1600 2, /* size (0 = byte, 1 = short, 2 = long) */
1601 32, /* bitsize */
1602 FALSE, /* pc_relative */
1603 0, /* bitpos */
1604 complain_overflow_bitfield,/* complain_on_overflow */
1605 bfd_elf_generic_reloc, /* special_function */
1606 "R_ARM_TLS_LE32", /* name */
1607 TRUE, /* partial_inplace */
1608 0xffffffff, /* src_mask */
1609 0xffffffff, /* dst_mask */
1610 FALSE), /* pcrel_offset */
1611
1612 HOWTO (R_ARM_TLS_LDO12, /* type */
1613 0, /* rightshift */
1614 2, /* size (0 = byte, 1 = short, 2 = long) */
1615 12, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_bitfield,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_TLS_LDO12", /* name */
1621 FALSE, /* partial_inplace */
1622 0x00000fff, /* src_mask */
1623 0x00000fff, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_TLS_LE12, /* type */
1627 0, /* rightshift */
1628 2, /* size (0 = byte, 1 = short, 2 = long) */
1629 12, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_bitfield,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_TLS_LE12", /* name */
1635 FALSE, /* partial_inplace */
1636 0x00000fff, /* src_mask */
1637 0x00000fff, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_TLS_IE12GP, /* type */
1641 0, /* rightshift */
1642 2, /* size (0 = byte, 1 = short, 2 = long) */
1643 12, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_bitfield,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_TLS_IE12GP", /* name */
1649 FALSE, /* partial_inplace */
1650 0x00000fff, /* src_mask */
1651 0x00000fff, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 /* 112-127 private relocations. */
1655 EMPTY_HOWTO (112),
1656 EMPTY_HOWTO (113),
1657 EMPTY_HOWTO (114),
1658 EMPTY_HOWTO (115),
1659 EMPTY_HOWTO (116),
1660 EMPTY_HOWTO (117),
1661 EMPTY_HOWTO (118),
1662 EMPTY_HOWTO (119),
1663 EMPTY_HOWTO (120),
1664 EMPTY_HOWTO (121),
1665 EMPTY_HOWTO (122),
1666 EMPTY_HOWTO (123),
1667 EMPTY_HOWTO (124),
1668 EMPTY_HOWTO (125),
1669 EMPTY_HOWTO (126),
1670 EMPTY_HOWTO (127),
1671
1672 /* R_ARM_ME_TOO, obsolete. */
1673 EMPTY_HOWTO (128),
1674
1675 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1676 0, /* rightshift */
1677 1, /* size (0 = byte, 1 = short, 2 = long) */
1678 0, /* bitsize */
1679 FALSE, /* pc_relative */
1680 0, /* bitpos */
1681 complain_overflow_bitfield,/* complain_on_overflow */
1682 bfd_elf_generic_reloc, /* special_function */
1683 "R_ARM_THM_TLS_DESCSEQ",/* name */
1684 FALSE, /* partial_inplace */
1685 0x00000000, /* src_mask */
1686 0x00000000, /* dst_mask */
1687 FALSE), /* pcrel_offset */
1688 };
1689
1690 /* 160 onwards: */
1691 static reloc_howto_type elf32_arm_howto_table_2[1] =
1692 {
1693 HOWTO (R_ARM_IRELATIVE, /* type */
1694 0, /* rightshift */
1695 2, /* size (0 = byte, 1 = short, 2 = long) */
1696 32, /* bitsize */
1697 FALSE, /* pc_relative */
1698 0, /* bitpos */
1699 complain_overflow_bitfield,/* complain_on_overflow */
1700 bfd_elf_generic_reloc, /* special_function */
1701 "R_ARM_IRELATIVE", /* name */
1702 TRUE, /* partial_inplace */
1703 0xffffffff, /* src_mask */
1704 0xffffffff, /* dst_mask */
1705 FALSE) /* pcrel_offset */
1706 };
1707
1708 /* 249-255 extended, currently unused, relocations: */
1709 static reloc_howto_type elf32_arm_howto_table_3[4] =
1710 {
1711 HOWTO (R_ARM_RREL32, /* type */
1712 0, /* rightshift */
1713 0, /* size (0 = byte, 1 = short, 2 = long) */
1714 0, /* bitsize */
1715 FALSE, /* pc_relative */
1716 0, /* bitpos */
1717 complain_overflow_dont,/* complain_on_overflow */
1718 bfd_elf_generic_reloc, /* special_function */
1719 "R_ARM_RREL32", /* name */
1720 FALSE, /* partial_inplace */
1721 0, /* src_mask */
1722 0, /* dst_mask */
1723 FALSE), /* pcrel_offset */
1724
1725 HOWTO (R_ARM_RABS32, /* type */
1726 0, /* rightshift */
1727 0, /* size (0 = byte, 1 = short, 2 = long) */
1728 0, /* bitsize */
1729 FALSE, /* pc_relative */
1730 0, /* bitpos */
1731 complain_overflow_dont,/* complain_on_overflow */
1732 bfd_elf_generic_reloc, /* special_function */
1733 "R_ARM_RABS32", /* name */
1734 FALSE, /* partial_inplace */
1735 0, /* src_mask */
1736 0, /* dst_mask */
1737 FALSE), /* pcrel_offset */
1738
1739 HOWTO (R_ARM_RPC24, /* type */
1740 0, /* rightshift */
1741 0, /* size (0 = byte, 1 = short, 2 = long) */
1742 0, /* bitsize */
1743 FALSE, /* pc_relative */
1744 0, /* bitpos */
1745 complain_overflow_dont,/* complain_on_overflow */
1746 bfd_elf_generic_reloc, /* special_function */
1747 "R_ARM_RPC24", /* name */
1748 FALSE, /* partial_inplace */
1749 0, /* src_mask */
1750 0, /* dst_mask */
1751 FALSE), /* pcrel_offset */
1752
1753 HOWTO (R_ARM_RBASE, /* type */
1754 0, /* rightshift */
1755 0, /* size (0 = byte, 1 = short, 2 = long) */
1756 0, /* bitsize */
1757 FALSE, /* pc_relative */
1758 0, /* bitpos */
1759 complain_overflow_dont,/* complain_on_overflow */
1760 bfd_elf_generic_reloc, /* special_function */
1761 "R_ARM_RBASE", /* name */
1762 FALSE, /* partial_inplace */
1763 0, /* src_mask */
1764 0, /* dst_mask */
1765 FALSE) /* pcrel_offset */
1766 };
1767
1768 static reloc_howto_type *
1769 elf32_arm_howto_from_type (unsigned int r_type)
1770 {
1771 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1772 return &elf32_arm_howto_table_1[r_type];
1773
1774 if (r_type == R_ARM_IRELATIVE)
1775 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1776
1777 if (r_type >= R_ARM_RREL32
1778 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1779 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1780
1781 return NULL;
1782 }
1783
1784 static void
1785 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1786 Elf_Internal_Rela * elf_reloc)
1787 {
1788 unsigned int r_type;
1789
1790 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1791 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1792 }
1793
1794 struct elf32_arm_reloc_map
1795 {
1796 bfd_reloc_code_real_type bfd_reloc_val;
1797 unsigned char elf_reloc_val;
1798 };
1799
1800 /* All entries in this list must also be present in elf32_arm_howto_table. */
1801 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1802 {
1803 {BFD_RELOC_NONE, R_ARM_NONE},
1804 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1805 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1806 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1807 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1808 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1809 {BFD_RELOC_32, R_ARM_ABS32},
1810 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1811 {BFD_RELOC_8, R_ARM_ABS8},
1812 {BFD_RELOC_16, R_ARM_ABS16},
1813 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1814 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1815 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1816 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1817 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1818 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1821 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1822 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1823 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1824 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1825 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1826 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1827 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1828 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1829 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1830 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1831 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1832 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1833 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1834 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1835 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1836 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1837 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1838 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1839 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1840 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1841 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1842 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1843 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1844 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1845 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1846 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1847 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1848 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1849 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1850 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1851 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1852 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1853 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1854 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1855 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1856 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1857 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1858 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1859 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1860 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1861 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1862 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1863 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1864 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1865 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1866 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1867 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1868 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1869 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1870 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1871 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1872 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1873 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1874 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1875 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1876 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1877 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1878 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1879 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1880 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1881 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1882 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1883 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1884 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1885 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1886 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1887 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1888 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1889 };
1890
1891 static reloc_howto_type *
1892 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1893 bfd_reloc_code_real_type code)
1894 {
1895 unsigned int i;
1896
1897 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1898 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1899 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1900
1901 return NULL;
1902 }
1903
1904 static reloc_howto_type *
1905 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1906 const char *r_name)
1907 {
1908 unsigned int i;
1909
1910 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1911 if (elf32_arm_howto_table_1[i].name != NULL
1912 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1913 return &elf32_arm_howto_table_1[i];
1914
1915 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1916 if (elf32_arm_howto_table_2[i].name != NULL
1917 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1918 return &elf32_arm_howto_table_2[i];
1919
1920 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1921 if (elf32_arm_howto_table_3[i].name != NULL
1922 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1923 return &elf32_arm_howto_table_3[i];
1924
1925 return NULL;
1926 }
1927
1928 /* Support for core dump NOTE sections. */
1929
1930 static bfd_boolean
1931 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1932 {
1933 int offset;
1934 size_t size;
1935
1936 switch (note->descsz)
1937 {
1938 default:
1939 return FALSE;
1940
1941 case 148: /* Linux/ARM 32-bit. */
1942 /* pr_cursig */
1943 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1944
1945 /* pr_pid */
1946 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1947
1948 /* pr_reg */
1949 offset = 72;
1950 size = 72;
1951
1952 break;
1953 }
1954
1955 /* Make a ".reg/999" section. */
1956 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1957 size, note->descpos + offset);
1958 }
1959
1960 static bfd_boolean
1961 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1962 {
1963 switch (note->descsz)
1964 {
1965 default:
1966 return FALSE;
1967
1968 case 124: /* Linux/ARM elf_prpsinfo. */
1969 elf_tdata (abfd)->core_program
1970 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1971 elf_tdata (abfd)->core_command
1972 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1973 }
1974
1975 /* Note that for some reason, a spurious space is tacked
1976 onto the end of the args in some (at least one anyway)
1977 implementations, so strip it off if it exists. */
1978 {
1979 char *command = elf_tdata (abfd)->core_command;
1980 int n = strlen (command);
1981
1982 if (0 < n && command[n - 1] == ' ')
1983 command[n - 1] = '\0';
1984 }
1985
1986 return TRUE;
1987 }
1988
1989 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1990 #define TARGET_LITTLE_NAME "elf32-littlearm"
1991 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1992 #define TARGET_BIG_NAME "elf32-bigarm"
1993
1994 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1995 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1996
1997 typedef unsigned long int insn32;
1998 typedef unsigned short int insn16;
1999
2000 /* In lieu of proper flags, assume all EABIv4 or later objects are
2001 interworkable. */
2002 #define INTERWORK_FLAG(abfd) \
2003 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2004 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2005 || ((abfd)->flags & BFD_LINKER_CREATED))
2006
2007 /* The linker script knows the section names for placement.
2008 The entry_names are used to do simple name mangling on the stubs.
2009 Given a function name, and its type, the stub can be found. The
2010 name can be changed. The only requirement is the %s be present. */
2011 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2012 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2013
2014 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2015 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2016
2017 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2018 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2019
2020 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2021 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2022
2023 #define STUB_ENTRY_NAME "__%s_veneer"
2024
2025 /* The name of the dynamic interpreter. This is put in the .interp
2026 section. */
2027 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2028
2029 static const unsigned long tls_trampoline [] =
2030 {
2031 0xe08e0000, /* add r0, lr, r0 */
2032 0xe5901004, /* ldr r1, [r0,#4] */
2033 0xe12fff11, /* bx r1 */
2034 };
2035
2036 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2037 {
2038 0xe52d2004, /* push {r2} */
2039 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2040 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2041 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2042 0xe081100f, /* 2: add r1, pc */
2043 0xe12fff12, /* bx r2 */
2044 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2045 + dl_tlsdesc_lazy_resolver(GOT) */
2046 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2047 };
2048
2049 #ifdef FOUR_WORD_PLT
2050
2051 /* The first entry in a procedure linkage table looks like
2052 this. It is set up so that any shared library function that is
2053 called before the relocation has been set up calls the dynamic
2054 linker first. */
2055 static const bfd_vma elf32_arm_plt0_entry [] =
2056 {
2057 0xe52de004, /* str lr, [sp, #-4]! */
2058 0xe59fe010, /* ldr lr, [pc, #16] */
2059 0xe08fe00e, /* add lr, pc, lr */
2060 0xe5bef008, /* ldr pc, [lr, #8]! */
2061 };
2062
2063 /* Subsequent entries in a procedure linkage table look like
2064 this. */
2065 static const bfd_vma elf32_arm_plt_entry [] =
2066 {
2067 0xe28fc600, /* add ip, pc, #NN */
2068 0xe28cca00, /* add ip, ip, #NN */
2069 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2070 0x00000000, /* unused */
2071 };
2072
2073 #else
2074
2075 /* The first entry in a procedure linkage table looks like
2076 this. It is set up so that any shared library function that is
2077 called before the relocation has been set up calls the dynamic
2078 linker first. */
2079 static const bfd_vma elf32_arm_plt0_entry [] =
2080 {
2081 0xe52de004, /* str lr, [sp, #-4]! */
2082 0xe59fe004, /* ldr lr, [pc, #4] */
2083 0xe08fe00e, /* add lr, pc, lr */
2084 0xe5bef008, /* ldr pc, [lr, #8]! */
2085 0x00000000, /* &GOT[0] - . */
2086 };
2087
2088 /* Subsequent entries in a procedure linkage table look like
2089 this. */
2090 static const bfd_vma elf32_arm_plt_entry [] =
2091 {
2092 0xe28fc600, /* add ip, pc, #0xNN00000 */
2093 0xe28cca00, /* add ip, ip, #0xNN000 */
2094 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2095 };
2096
2097 #endif
2098
2099 /* The format of the first entry in the procedure linkage table
2100 for a VxWorks executable. */
2101 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2102 {
2103 0xe52dc008, /* str ip,[sp,#-8]! */
2104 0xe59fc000, /* ldr ip,[pc] */
2105 0xe59cf008, /* ldr pc,[ip,#8] */
2106 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2107 };
2108
2109 /* The format of subsequent entries in a VxWorks executable. */
2110 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2111 {
2112 0xe59fc000, /* ldr ip,[pc] */
2113 0xe59cf000, /* ldr pc,[ip] */
2114 0x00000000, /* .long @got */
2115 0xe59fc000, /* ldr ip,[pc] */
2116 0xea000000, /* b _PLT */
2117 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2118 };
2119
2120 /* The format of entries in a VxWorks shared library. */
2121 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2122 {
2123 0xe59fc000, /* ldr ip,[pc] */
2124 0xe79cf009, /* ldr pc,[ip,r9] */
2125 0x00000000, /* .long @got */
2126 0xe59fc000, /* ldr ip,[pc] */
2127 0xe599f008, /* ldr pc,[r9,#8] */
2128 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2129 };
2130
2131 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2132 #define PLT_THUMB_STUB_SIZE 4
2133 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2134 {
2135 0x4778, /* bx pc */
2136 0x46c0 /* nop */
2137 };
2138
2139 /* The entries in a PLT when using a DLL-based target with multiple
2140 address spaces. */
2141 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2142 {
2143 0xe51ff004, /* ldr pc, [pc, #-4] */
2144 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2145 };
2146
2147 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2148 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2149 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2150 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2151 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2152 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2153
2154 enum stub_insn_type
2155 {
2156 THUMB16_TYPE = 1,
2157 THUMB32_TYPE,
2158 ARM_TYPE,
2159 DATA_TYPE
2160 };
2161
2162 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2163 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2164 is inserted in arm_build_one_stub(). */
2165 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2166 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2167 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2168 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2169 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2170 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2171
2172 typedef struct
2173 {
2174 bfd_vma data;
2175 enum stub_insn_type type;
2176 unsigned int r_type;
2177 int reloc_addend;
2178 } insn_sequence;
2179
2180 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2181 to reach the stub if necessary. */
2182 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2183 {
2184 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2185 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2186 };
2187
2188 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2189 available. */
2190 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2191 {
2192 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2193 ARM_INSN(0xe12fff1c), /* bx ip */
2194 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2195 };
2196
2197 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2198 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2199 {
2200 THUMB16_INSN(0xb401), /* push {r0} */
2201 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2202 THUMB16_INSN(0x4684), /* mov ip, r0 */
2203 THUMB16_INSN(0xbc01), /* pop {r0} */
2204 THUMB16_INSN(0x4760), /* bx ip */
2205 THUMB16_INSN(0xbf00), /* nop */
2206 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2207 };
2208
2209 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2210 allowed. */
2211 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2212 {
2213 THUMB16_INSN(0x4778), /* bx pc */
2214 THUMB16_INSN(0x46c0), /* nop */
2215 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2216 ARM_INSN(0xe12fff1c), /* bx ip */
2217 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2218 };
2219
2220 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2221 available. */
2222 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2223 {
2224 THUMB16_INSN(0x4778), /* bx pc */
2225 THUMB16_INSN(0x46c0), /* nop */
2226 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2227 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2228 };
2229
2230 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2231 one, when the destination is close enough. */
2232 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2233 {
2234 THUMB16_INSN(0x4778), /* bx pc */
2235 THUMB16_INSN(0x46c0), /* nop */
2236 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2237 };
2238
2239 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2240 blx to reach the stub if necessary. */
2241 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2242 {
2243 ARM_INSN(0xe59fc000), /* ldr ip, [pc] */
2244 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2245 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2246 };
2247
2248 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2249 blx to reach the stub if necessary. We can not add into pc;
2250 it is not guaranteed to mode switch (different in ARMv6 and
2251 ARMv7). */
2252 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2253 {
2254 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2255 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2256 ARM_INSN(0xe12fff1c), /* bx ip */
2257 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2258 };
2259
2260 /* V4T ARM -> ARM long branch stub, PIC. */
2261 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2262 {
2263 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2264 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2265 ARM_INSN(0xe12fff1c), /* bx ip */
2266 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2267 };
2268
2269 /* V4T Thumb -> ARM long branch stub, PIC. */
2270 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2271 {
2272 THUMB16_INSN(0x4778), /* bx pc */
2273 THUMB16_INSN(0x46c0), /* nop */
2274 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2275 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2276 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2277 };
2278
2279 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2280 architectures. */
2281 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2282 {
2283 THUMB16_INSN(0xb401), /* push {r0} */
2284 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2285 THUMB16_INSN(0x46fc), /* mov ip, pc */
2286 THUMB16_INSN(0x4484), /* add ip, r0 */
2287 THUMB16_INSN(0xbc01), /* pop {r0} */
2288 THUMB16_INSN(0x4760), /* bx ip */
2289 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2290 };
2291
2292 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2293 allowed. */
2294 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2295 {
2296 THUMB16_INSN(0x4778), /* bx pc */
2297 THUMB16_INSN(0x46c0), /* nop */
2298 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2299 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2300 ARM_INSN(0xe12fff1c), /* bx ip */
2301 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2302 };
2303
2304 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2305 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2306 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2307 {
2308 ARM_INSN(0xe59f1000), /* ldr r1, [pc] */
2309 ARM_INSN(0xe08ff001), /* add pc, pc, r1 */
2310 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2311 };
2312
2313 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2314 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2315 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2316 {
2317 THUMB16_INSN(0x4778), /* bx pc */
2318 THUMB16_INSN(0x46c0), /* nop */
2319 ARM_INSN(0xe59f1000), /* ldr r1, [pc, #0] */
2320 ARM_INSN(0xe081f00f), /* add pc, r1, pc */
2321 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2322 };
2323
2324 /* Cortex-A8 erratum-workaround stubs. */
2325
2326 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2327 can't use a conditional branch to reach this stub). */
2328
2329 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2330 {
2331 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2332 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2333 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2334 };
2335
2336 /* Stub used for b.w and bl.w instructions. */
2337
2338 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2339 {
2340 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2341 };
2342
2343 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2344 {
2345 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2346 };
2347
2348 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2349 instruction (which switches to ARM mode) to point to this stub. Jump to the
2350 real destination using an ARM-mode branch. */
2351
2352 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2353 {
2354 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2355 };
2356
2357 /* Section name for stubs is the associated section name plus this
2358 string. */
2359 #define STUB_SUFFIX ".stub"
2360
2361 /* One entry per long/short branch stub defined above. */
2362 #define DEF_STUBS \
2363 DEF_STUB(long_branch_any_any) \
2364 DEF_STUB(long_branch_v4t_arm_thumb) \
2365 DEF_STUB(long_branch_thumb_only) \
2366 DEF_STUB(long_branch_v4t_thumb_thumb) \
2367 DEF_STUB(long_branch_v4t_thumb_arm) \
2368 DEF_STUB(short_branch_v4t_thumb_arm) \
2369 DEF_STUB(long_branch_any_arm_pic) \
2370 DEF_STUB(long_branch_any_thumb_pic) \
2371 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2372 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2373 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2374 DEF_STUB(long_branch_thumb_only_pic) \
2375 DEF_STUB(long_branch_any_tls_pic) \
2376 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2377 DEF_STUB(a8_veneer_b_cond) \
2378 DEF_STUB(a8_veneer_b) \
2379 DEF_STUB(a8_veneer_bl) \
2380 DEF_STUB(a8_veneer_blx)
2381
2382 #define DEF_STUB(x) arm_stub_##x,
2383 enum elf32_arm_stub_type {
2384 arm_stub_none,
2385 DEF_STUBS
2386 /* Note the first a8_veneer type */
2387 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2388 };
2389 #undef DEF_STUB
2390
2391 typedef struct
2392 {
2393 const insn_sequence* template_sequence;
2394 int template_size;
2395 } stub_def;
2396
2397 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2398 static const stub_def stub_definitions[] = {
2399 {NULL, 0},
2400 DEF_STUBS
2401 };
2402
2403 struct elf32_arm_stub_hash_entry
2404 {
2405 /* Base hash table entry structure. */
2406 struct bfd_hash_entry root;
2407
2408 /* The stub section. */
2409 asection *stub_sec;
2410
2411 /* Offset within stub_sec of the beginning of this stub. */
2412 bfd_vma stub_offset;
2413
2414 /* Given the symbol's value and its section we can determine its final
2415 value when building the stubs (so the stub knows where to jump). */
2416 bfd_vma target_value;
2417 asection *target_section;
2418
2419 /* Offset to apply to relocation referencing target_value. */
2420 bfd_vma target_addend;
2421
2422 /* The instruction which caused this stub to be generated (only valid for
2423 Cortex-A8 erratum workaround stubs at present). */
2424 unsigned long orig_insn;
2425
2426 /* The stub type. */
2427 enum elf32_arm_stub_type stub_type;
2428 /* Its encoding size in bytes. */
2429 int stub_size;
2430 /* Its template. */
2431 const insn_sequence *stub_template;
2432 /* The size of the template (number of entries). */
2433 int stub_template_size;
2434
2435 /* The symbol table entry, if any, that this was derived from. */
2436 struct elf32_arm_link_hash_entry *h;
2437
2438 /* Type of branch. */
2439 enum arm_st_branch_type branch_type;
2440
2441 /* Where this stub is being called from, or, in the case of combined
2442 stub sections, the first input section in the group. */
2443 asection *id_sec;
2444
2445 /* The name for the local symbol at the start of this stub. The
2446 stub name in the hash table has to be unique; this does not, so
2447 it can be friendlier. */
2448 char *output_name;
2449 };
2450
2451 /* Used to build a map of a section. This is required for mixed-endian
2452 code/data. */
2453
2454 typedef struct elf32_elf_section_map
2455 {
2456 bfd_vma vma;
2457 char type;
2458 }
2459 elf32_arm_section_map;
2460
2461 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2462
2463 typedef enum
2464 {
2465 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2466 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2467 VFP11_ERRATUM_ARM_VENEER,
2468 VFP11_ERRATUM_THUMB_VENEER
2469 }
2470 elf32_vfp11_erratum_type;
2471
2472 typedef struct elf32_vfp11_erratum_list
2473 {
2474 struct elf32_vfp11_erratum_list *next;
2475 bfd_vma vma;
2476 union
2477 {
2478 struct
2479 {
2480 struct elf32_vfp11_erratum_list *veneer;
2481 unsigned int vfp_insn;
2482 } b;
2483 struct
2484 {
2485 struct elf32_vfp11_erratum_list *branch;
2486 unsigned int id;
2487 } v;
2488 } u;
2489 elf32_vfp11_erratum_type type;
2490 }
2491 elf32_vfp11_erratum_list;
2492
2493 typedef enum
2494 {
2495 DELETE_EXIDX_ENTRY,
2496 INSERT_EXIDX_CANTUNWIND_AT_END
2497 }
2498 arm_unwind_edit_type;
2499
2500 /* A (sorted) list of edits to apply to an unwind table. */
2501 typedef struct arm_unwind_table_edit
2502 {
2503 arm_unwind_edit_type type;
2504 /* Note: we sometimes want to insert an unwind entry corresponding to a
2505 section different from the one we're currently writing out, so record the
2506 (text) section this edit relates to here. */
2507 asection *linked_section;
2508 unsigned int index;
2509 struct arm_unwind_table_edit *next;
2510 }
2511 arm_unwind_table_edit;
2512
2513 typedef struct _arm_elf_section_data
2514 {
2515 /* Information about mapping symbols. */
2516 struct bfd_elf_section_data elf;
2517 unsigned int mapcount;
2518 unsigned int mapsize;
2519 elf32_arm_section_map *map;
2520 /* Information about CPU errata. */
2521 unsigned int erratumcount;
2522 elf32_vfp11_erratum_list *erratumlist;
2523 /* Information about unwind tables. */
2524 union
2525 {
2526 /* Unwind info attached to a text section. */
2527 struct
2528 {
2529 asection *arm_exidx_sec;
2530 } text;
2531
2532 /* Unwind info attached to an .ARM.exidx section. */
2533 struct
2534 {
2535 arm_unwind_table_edit *unwind_edit_list;
2536 arm_unwind_table_edit *unwind_edit_tail;
2537 } exidx;
2538 } u;
2539 }
2540 _arm_elf_section_data;
2541
2542 #define elf32_arm_section_data(sec) \
2543 ((_arm_elf_section_data *) elf_section_data (sec))
2544
2545 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2546 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2547 so may be created multiple times: we use an array of these entries whilst
2548 relaxing which we can refresh easily, then create stubs for each potentially
2549 erratum-triggering instruction once we've settled on a solution. */
2550
2551 struct a8_erratum_fix {
2552 bfd *input_bfd;
2553 asection *section;
2554 bfd_vma offset;
2555 bfd_vma addend;
2556 unsigned long orig_insn;
2557 char *stub_name;
2558 enum elf32_arm_stub_type stub_type;
2559 enum arm_st_branch_type branch_type;
2560 };
2561
2562 /* A table of relocs applied to branches which might trigger Cortex-A8
2563 erratum. */
2564
2565 struct a8_erratum_reloc {
2566 bfd_vma from;
2567 bfd_vma destination;
2568 struct elf32_arm_link_hash_entry *hash;
2569 const char *sym_name;
2570 unsigned int r_type;
2571 enum arm_st_branch_type branch_type;
2572 bfd_boolean non_a8_stub;
2573 };
2574
2575 /* The size of the thread control block. */
2576 #define TCB_SIZE 8
2577
2578 /* ARM-specific information about a PLT entry, over and above the usual
2579 gotplt_union. */
2580 struct arm_plt_info {
2581 /* We reference count Thumb references to a PLT entry separately,
2582 so that we can emit the Thumb trampoline only if needed. */
2583 bfd_signed_vma thumb_refcount;
2584
2585 /* Some references from Thumb code may be eliminated by BL->BLX
2586 conversion, so record them separately. */
2587 bfd_signed_vma maybe_thumb_refcount;
2588
2589 /* How many of the recorded PLT accesses were from non-call relocations.
2590 This information is useful when deciding whether anything takes the
2591 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2592 non-call references to the function should resolve directly to the
2593 real runtime target. */
2594 unsigned int noncall_refcount;
2595
2596 /* Since PLT entries have variable size if the Thumb prologue is
2597 used, we need to record the index into .got.plt instead of
2598 recomputing it from the PLT offset. */
2599 bfd_signed_vma got_offset;
2600 };
2601
2602 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2603 struct arm_local_iplt_info {
2604 /* The information that is usually found in the generic ELF part of
2605 the hash table entry. */
2606 union gotplt_union root;
2607
2608 /* The information that is usually found in the ARM-specific part of
2609 the hash table entry. */
2610 struct arm_plt_info arm;
2611
2612 /* A list of all potential dynamic relocations against this symbol. */
2613 struct elf_dyn_relocs *dyn_relocs;
2614 };
2615
2616 struct elf_arm_obj_tdata
2617 {
2618 struct elf_obj_tdata root;
2619
2620 /* tls_type for each local got entry. */
2621 char *local_got_tls_type;
2622
2623 /* GOTPLT entries for TLS descriptors. */
2624 bfd_vma *local_tlsdesc_gotent;
2625
2626 /* Information for local symbols that need entries in .iplt. */
2627 struct arm_local_iplt_info **local_iplt;
2628
2629 /* Zero to warn when linking objects with incompatible enum sizes. */
2630 int no_enum_size_warning;
2631
2632 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2633 int no_wchar_size_warning;
2634 };
2635
2636 #define elf_arm_tdata(bfd) \
2637 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2638
2639 #define elf32_arm_local_got_tls_type(bfd) \
2640 (elf_arm_tdata (bfd)->local_got_tls_type)
2641
2642 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2643 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2644
2645 #define elf32_arm_local_iplt(bfd) \
2646 (elf_arm_tdata (bfd)->local_iplt)
2647
2648 #define is_arm_elf(bfd) \
2649 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2650 && elf_tdata (bfd) != NULL \
2651 && elf_object_id (bfd) == ARM_ELF_DATA)
2652
2653 static bfd_boolean
2654 elf32_arm_mkobject (bfd *abfd)
2655 {
2656 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2657 ARM_ELF_DATA);
2658 }
2659
2660 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2661
2662 /* Arm ELF linker hash entry. */
2663 struct elf32_arm_link_hash_entry
2664 {
2665 struct elf_link_hash_entry root;
2666
2667 /* Track dynamic relocs copied for this symbol. */
2668 struct elf_dyn_relocs *dyn_relocs;
2669
2670 /* ARM-specific PLT information. */
2671 struct arm_plt_info plt;
2672
2673 #define GOT_UNKNOWN 0
2674 #define GOT_NORMAL 1
2675 #define GOT_TLS_GD 2
2676 #define GOT_TLS_IE 4
2677 #define GOT_TLS_GDESC 8
2678 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2679 unsigned int tls_type : 8;
2680
2681 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2682 unsigned int is_iplt : 1;
2683
2684 unsigned int unused : 23;
2685
2686 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2687 starting at the end of the jump table. */
2688 bfd_vma tlsdesc_got;
2689
2690 /* The symbol marking the real symbol location for exported thumb
2691 symbols with Arm stubs. */
2692 struct elf_link_hash_entry *export_glue;
2693
2694 /* A pointer to the most recently used stub hash entry against this
2695 symbol. */
2696 struct elf32_arm_stub_hash_entry *stub_cache;
2697 };
2698
2699 /* Traverse an arm ELF linker hash table. */
2700 #define elf32_arm_link_hash_traverse(table, func, info) \
2701 (elf_link_hash_traverse \
2702 (&(table)->root, \
2703 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2704 (info)))
2705
2706 /* Get the ARM elf linker hash table from a link_info structure. */
2707 #define elf32_arm_hash_table(info) \
2708 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2709 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2710
2711 #define arm_stub_hash_lookup(table, string, create, copy) \
2712 ((struct elf32_arm_stub_hash_entry *) \
2713 bfd_hash_lookup ((table), (string), (create), (copy)))
2714
2715 /* Array to keep track of which stub sections have been created, and
2716 information on stub grouping. */
2717 struct map_stub
2718 {
2719 /* This is the section to which stubs in the group will be
2720 attached. */
2721 asection *link_sec;
2722 /* The stub section. */
2723 asection *stub_sec;
2724 };
2725
2726 #define elf32_arm_compute_jump_table_size(htab) \
2727 ((htab)->next_tls_desc_index * 4)
2728
2729 /* ARM ELF linker hash table. */
2730 struct elf32_arm_link_hash_table
2731 {
2732 /* The main hash table. */
2733 struct elf_link_hash_table root;
2734
2735 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2736 bfd_size_type thumb_glue_size;
2737
2738 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2739 bfd_size_type arm_glue_size;
2740
2741 /* The size in bytes of section containing the ARMv4 BX veneers. */
2742 bfd_size_type bx_glue_size;
2743
2744 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2745 veneer has been populated. */
2746 bfd_vma bx_glue_offset[15];
2747
2748 /* The size in bytes of the section containing glue for VFP11 erratum
2749 veneers. */
2750 bfd_size_type vfp11_erratum_glue_size;
2751
2752 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2753 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2754 elf32_arm_write_section(). */
2755 struct a8_erratum_fix *a8_erratum_fixes;
2756 unsigned int num_a8_erratum_fixes;
2757
2758 /* An arbitrary input BFD chosen to hold the glue sections. */
2759 bfd * bfd_of_glue_owner;
2760
2761 /* Nonzero to output a BE8 image. */
2762 int byteswap_code;
2763
2764 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2765 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2766 int target1_is_rel;
2767
2768 /* The relocation to use for R_ARM_TARGET2 relocations. */
2769 int target2_reloc;
2770
2771 /* 0 = Ignore R_ARM_V4BX.
2772 1 = Convert BX to MOV PC.
2773 2 = Generate v4 interworing stubs. */
2774 int fix_v4bx;
2775
2776 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2777 int fix_cortex_a8;
2778
2779 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2780 int use_blx;
2781
2782 /* What sort of code sequences we should look for which may trigger the
2783 VFP11 denorm erratum. */
2784 bfd_arm_vfp11_fix vfp11_fix;
2785
2786 /* Global counter for the number of fixes we have emitted. */
2787 int num_vfp11_fixes;
2788
2789 /* Nonzero to force PIC branch veneers. */
2790 int pic_veneer;
2791
2792 /* The number of bytes in the initial entry in the PLT. */
2793 bfd_size_type plt_header_size;
2794
2795 /* The number of bytes in the subsequent PLT etries. */
2796 bfd_size_type plt_entry_size;
2797
2798 /* True if the target system is VxWorks. */
2799 int vxworks_p;
2800
2801 /* True if the target system is Symbian OS. */
2802 int symbian_p;
2803
2804 /* True if the target uses REL relocations. */
2805 int use_rel;
2806
2807 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
2808 bfd_vma next_tls_desc_index;
2809
2810 /* How many R_ARM_TLS_DESC relocations were generated so far. */
2811 bfd_vma num_tls_desc;
2812
2813 /* Short-cuts to get to dynamic linker sections. */
2814 asection *sdynbss;
2815 asection *srelbss;
2816
2817 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2818 asection *srelplt2;
2819
2820 /* The offset into splt of the PLT entry for the TLS descriptor
2821 resolver. Special values are 0, if not necessary (or not found
2822 to be necessary yet), and -1 if needed but not determined
2823 yet. */
2824 bfd_vma dt_tlsdesc_plt;
2825
2826 /* The offset into sgot of the GOT entry used by the PLT entry
2827 above. */
2828 bfd_vma dt_tlsdesc_got;
2829
2830 /* Offset in .plt section of tls_arm_trampoline. */
2831 bfd_vma tls_trampoline;
2832
2833 /* Data for R_ARM_TLS_LDM32 relocations. */
2834 union
2835 {
2836 bfd_signed_vma refcount;
2837 bfd_vma offset;
2838 } tls_ldm_got;
2839
2840 /* Small local sym cache. */
2841 struct sym_cache sym_cache;
2842
2843 /* For convenience in allocate_dynrelocs. */
2844 bfd * obfd;
2845
2846 /* The amount of space used by the reserved portion of the sgotplt
2847 section, plus whatever space is used by the jump slots. */
2848 bfd_vma sgotplt_jump_table_size;
2849
2850 /* The stub hash table. */
2851 struct bfd_hash_table stub_hash_table;
2852
2853 /* Linker stub bfd. */
2854 bfd *stub_bfd;
2855
2856 /* Linker call-backs. */
2857 asection * (*add_stub_section) (const char *, asection *);
2858 void (*layout_sections_again) (void);
2859
2860 /* Array to keep track of which stub sections have been created, and
2861 information on stub grouping. */
2862 struct map_stub *stub_group;
2863
2864 /* Number of elements in stub_group. */
2865 int top_id;
2866
2867 /* Assorted information used by elf32_arm_size_stubs. */
2868 unsigned int bfd_count;
2869 int top_index;
2870 asection **input_list;
2871 };
2872
2873 /* Create an entry in an ARM ELF linker hash table. */
2874
2875 static struct bfd_hash_entry *
2876 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2877 struct bfd_hash_table * table,
2878 const char * string)
2879 {
2880 struct elf32_arm_link_hash_entry * ret =
2881 (struct elf32_arm_link_hash_entry *) entry;
2882
2883 /* Allocate the structure if it has not already been allocated by a
2884 subclass. */
2885 if (ret == NULL)
2886 ret = (struct elf32_arm_link_hash_entry *)
2887 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2888 if (ret == NULL)
2889 return (struct bfd_hash_entry *) ret;
2890
2891 /* Call the allocation method of the superclass. */
2892 ret = ((struct elf32_arm_link_hash_entry *)
2893 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2894 table, string));
2895 if (ret != NULL)
2896 {
2897 ret->dyn_relocs = NULL;
2898 ret->tls_type = GOT_UNKNOWN;
2899 ret->tlsdesc_got = (bfd_vma) -1;
2900 ret->plt.thumb_refcount = 0;
2901 ret->plt.maybe_thumb_refcount = 0;
2902 ret->plt.noncall_refcount = 0;
2903 ret->plt.got_offset = -1;
2904 ret->is_iplt = FALSE;
2905 ret->export_glue = NULL;
2906
2907 ret->stub_cache = NULL;
2908 }
2909
2910 return (struct bfd_hash_entry *) ret;
2911 }
2912
2913 /* Ensure that we have allocated bookkeeping structures for ABFD's local
2914 symbols. */
2915
2916 static bfd_boolean
2917 elf32_arm_allocate_local_sym_info (bfd *abfd)
2918 {
2919 if (elf_local_got_refcounts (abfd) == NULL)
2920 {
2921 bfd_size_type num_syms;
2922 bfd_size_type size;
2923 char *data;
2924
2925 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
2926 size = num_syms * (sizeof (bfd_signed_vma)
2927 + sizeof (struct arm_local_iplt_info *)
2928 + sizeof (bfd_vma)
2929 + sizeof (char));
2930 data = bfd_zalloc (abfd, size);
2931 if (data == NULL)
2932 return FALSE;
2933
2934 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
2935 data += num_syms * sizeof (bfd_signed_vma);
2936
2937 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
2938 data += num_syms * sizeof (struct arm_local_iplt_info *);
2939
2940 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
2941 data += num_syms * sizeof (bfd_vma);
2942
2943 elf32_arm_local_got_tls_type (abfd) = data;
2944 }
2945 return TRUE;
2946 }
2947
2948 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
2949 to input bfd ABFD. Create the information if it doesn't already exist.
2950 Return null if an allocation fails. */
2951
2952 static struct arm_local_iplt_info *
2953 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
2954 {
2955 struct arm_local_iplt_info **ptr;
2956
2957 if (!elf32_arm_allocate_local_sym_info (abfd))
2958 return NULL;
2959
2960 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
2961 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
2962 if (*ptr == NULL)
2963 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
2964 return *ptr;
2965 }
2966
2967 /* Try to obtain PLT information for the symbol with index R_SYMNDX
2968 in ABFD's symbol table. If the symbol is global, H points to its
2969 hash table entry, otherwise H is null.
2970
2971 Return true if the symbol does have PLT information. When returning
2972 true, point *ROOT_PLT at the target-independent reference count/offset
2973 union and *ARM_PLT at the ARM-specific information. */
2974
2975 static bfd_boolean
2976 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
2977 unsigned long r_symndx, union gotplt_union **root_plt,
2978 struct arm_plt_info **arm_plt)
2979 {
2980 struct arm_local_iplt_info *local_iplt;
2981
2982 if (h != NULL)
2983 {
2984 *root_plt = &h->root.plt;
2985 *arm_plt = &h->plt;
2986 return TRUE;
2987 }
2988
2989 if (elf32_arm_local_iplt (abfd) == NULL)
2990 return FALSE;
2991
2992 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
2993 if (local_iplt == NULL)
2994 return FALSE;
2995
2996 *root_plt = &local_iplt->root;
2997 *arm_plt = &local_iplt->arm;
2998 return TRUE;
2999 }
3000
3001 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3002 before it. */
3003
3004 static bfd_boolean
3005 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3006 struct arm_plt_info *arm_plt)
3007 {
3008 struct elf32_arm_link_hash_table *htab;
3009
3010 htab = elf32_arm_hash_table (info);
3011 return (arm_plt->thumb_refcount != 0
3012 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3013 }
3014
3015 /* Return a pointer to the head of the dynamic reloc list that should
3016 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3017 ABFD's symbol table. Return null if an error occurs. */
3018
3019 static struct elf_dyn_relocs **
3020 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3021 Elf_Internal_Sym *isym)
3022 {
3023 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3024 {
3025 struct arm_local_iplt_info *local_iplt;
3026
3027 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3028 if (local_iplt == NULL)
3029 return NULL;
3030 return &local_iplt->dyn_relocs;
3031 }
3032 else
3033 {
3034 /* Track dynamic relocs needed for local syms too.
3035 We really need local syms available to do this
3036 easily. Oh well. */
3037 asection *s;
3038 void *vpp;
3039
3040 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3041 if (s == NULL)
3042 abort ();
3043
3044 vpp = &elf_section_data (s)->local_dynrel;
3045 return (struct elf_dyn_relocs **) vpp;
3046 }
3047 }
3048
3049 /* Initialize an entry in the stub hash table. */
3050
3051 static struct bfd_hash_entry *
3052 stub_hash_newfunc (struct bfd_hash_entry *entry,
3053 struct bfd_hash_table *table,
3054 const char *string)
3055 {
3056 /* Allocate the structure if it has not already been allocated by a
3057 subclass. */
3058 if (entry == NULL)
3059 {
3060 entry = (struct bfd_hash_entry *)
3061 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3062 if (entry == NULL)
3063 return entry;
3064 }
3065
3066 /* Call the allocation method of the superclass. */
3067 entry = bfd_hash_newfunc (entry, table, string);
3068 if (entry != NULL)
3069 {
3070 struct elf32_arm_stub_hash_entry *eh;
3071
3072 /* Initialize the local fields. */
3073 eh = (struct elf32_arm_stub_hash_entry *) entry;
3074 eh->stub_sec = NULL;
3075 eh->stub_offset = 0;
3076 eh->target_value = 0;
3077 eh->target_section = NULL;
3078 eh->target_addend = 0;
3079 eh->orig_insn = 0;
3080 eh->stub_type = arm_stub_none;
3081 eh->stub_size = 0;
3082 eh->stub_template = NULL;
3083 eh->stub_template_size = 0;
3084 eh->h = NULL;
3085 eh->id_sec = NULL;
3086 eh->output_name = NULL;
3087 }
3088
3089 return entry;
3090 }
3091
3092 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3093 shortcuts to them in our hash table. */
3094
3095 static bfd_boolean
3096 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3097 {
3098 struct elf32_arm_link_hash_table *htab;
3099
3100 htab = elf32_arm_hash_table (info);
3101 if (htab == NULL)
3102 return FALSE;
3103
3104 /* BPABI objects never have a GOT, or associated sections. */
3105 if (htab->symbian_p)
3106 return TRUE;
3107
3108 if (! _bfd_elf_create_got_section (dynobj, info))
3109 return FALSE;
3110
3111 return TRUE;
3112 }
3113
3114 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3115
3116 static bfd_boolean
3117 create_ifunc_sections (struct bfd_link_info *info)
3118 {
3119 struct elf32_arm_link_hash_table *htab;
3120 const struct elf_backend_data *bed;
3121 bfd *dynobj;
3122 asection *s;
3123 flagword flags;
3124
3125 htab = elf32_arm_hash_table (info);
3126 dynobj = htab->root.dynobj;
3127 bed = get_elf_backend_data (dynobj);
3128 flags = bed->dynamic_sec_flags;
3129
3130 if (htab->root.iplt == NULL)
3131 {
3132 s = bfd_make_section_with_flags (dynobj, ".iplt",
3133 flags | SEC_READONLY | SEC_CODE);
3134 if (s == NULL
3135 || !bfd_set_section_alignment (abfd, s, bed->plt_alignment))
3136 return FALSE;
3137 htab->root.iplt = s;
3138 }
3139
3140 if (htab->root.irelplt == NULL)
3141 {
3142 s = bfd_make_section_with_flags (dynobj, RELOC_SECTION (htab, ".iplt"),
3143 flags | SEC_READONLY);
3144 if (s == NULL
3145 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
3146 return FALSE;
3147 htab->root.irelplt = s;
3148 }
3149
3150 if (htab->root.igotplt == NULL)
3151 {
3152 s = bfd_make_section_with_flags (dynobj, ".igot.plt", flags);
3153 if (s == NULL
3154 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3155 return FALSE;
3156 htab->root.igotplt = s;
3157 }
3158 return TRUE;
3159 }
3160
3161 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3162 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3163 hash table. */
3164
3165 static bfd_boolean
3166 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3167 {
3168 struct elf32_arm_link_hash_table *htab;
3169
3170 htab = elf32_arm_hash_table (info);
3171 if (htab == NULL)
3172 return FALSE;
3173
3174 if (!htab->root.sgot && !create_got_section (dynobj, info))
3175 return FALSE;
3176
3177 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3178 return FALSE;
3179
3180 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
3181 if (!info->shared)
3182 htab->srelbss = bfd_get_section_by_name (dynobj,
3183 RELOC_SECTION (htab, ".bss"));
3184
3185 if (htab->vxworks_p)
3186 {
3187 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3188 return FALSE;
3189
3190 if (info->shared)
3191 {
3192 htab->plt_header_size = 0;
3193 htab->plt_entry_size
3194 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3195 }
3196 else
3197 {
3198 htab->plt_header_size
3199 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3200 htab->plt_entry_size
3201 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3202 }
3203 }
3204
3205 if (!htab->root.splt
3206 || !htab->root.srelplt
3207 || !htab->sdynbss
3208 || (!info->shared && !htab->srelbss))
3209 abort ();
3210
3211 return TRUE;
3212 }
3213
3214 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3215
3216 static void
3217 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3218 struct elf_link_hash_entry *dir,
3219 struct elf_link_hash_entry *ind)
3220 {
3221 struct elf32_arm_link_hash_entry *edir, *eind;
3222
3223 edir = (struct elf32_arm_link_hash_entry *) dir;
3224 eind = (struct elf32_arm_link_hash_entry *) ind;
3225
3226 if (eind->dyn_relocs != NULL)
3227 {
3228 if (edir->dyn_relocs != NULL)
3229 {
3230 struct elf_dyn_relocs **pp;
3231 struct elf_dyn_relocs *p;
3232
3233 /* Add reloc counts against the indirect sym to the direct sym
3234 list. Merge any entries against the same section. */
3235 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3236 {
3237 struct elf_dyn_relocs *q;
3238
3239 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3240 if (q->sec == p->sec)
3241 {
3242 q->pc_count += p->pc_count;
3243 q->count += p->count;
3244 *pp = p->next;
3245 break;
3246 }
3247 if (q == NULL)
3248 pp = &p->next;
3249 }
3250 *pp = edir->dyn_relocs;
3251 }
3252
3253 edir->dyn_relocs = eind->dyn_relocs;
3254 eind->dyn_relocs = NULL;
3255 }
3256
3257 if (ind->root.type == bfd_link_hash_indirect)
3258 {
3259 /* Copy over PLT info. */
3260 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3261 eind->plt.thumb_refcount = 0;
3262 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3263 eind->plt.maybe_thumb_refcount = 0;
3264 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3265 eind->plt.noncall_refcount = 0;
3266
3267 /* We should only allocate a function to .iplt once the final
3268 symbol information is known. */
3269 BFD_ASSERT (!eind->is_iplt);
3270
3271 if (dir->got.refcount <= 0)
3272 {
3273 edir->tls_type = eind->tls_type;
3274 eind->tls_type = GOT_UNKNOWN;
3275 }
3276 }
3277
3278 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3279 }
3280
3281 /* Create an ARM elf linker hash table. */
3282
3283 static struct bfd_link_hash_table *
3284 elf32_arm_link_hash_table_create (bfd *abfd)
3285 {
3286 struct elf32_arm_link_hash_table *ret;
3287 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3288
3289 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
3290 if (ret == NULL)
3291 return NULL;
3292
3293 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3294 elf32_arm_link_hash_newfunc,
3295 sizeof (struct elf32_arm_link_hash_entry),
3296 ARM_ELF_DATA))
3297 {
3298 free (ret);
3299 return NULL;
3300 }
3301
3302 ret->sdynbss = NULL;
3303 ret->srelbss = NULL;
3304 ret->srelplt2 = NULL;
3305 ret->dt_tlsdesc_plt = 0;
3306 ret->dt_tlsdesc_got = 0;
3307 ret->tls_trampoline = 0;
3308 ret->next_tls_desc_index = 0;
3309 ret->num_tls_desc = 0;
3310 ret->thumb_glue_size = 0;
3311 ret->arm_glue_size = 0;
3312 ret->bx_glue_size = 0;
3313 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
3314 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3315 ret->vfp11_erratum_glue_size = 0;
3316 ret->num_vfp11_fixes = 0;
3317 ret->fix_cortex_a8 = 0;
3318 ret->bfd_of_glue_owner = NULL;
3319 ret->byteswap_code = 0;
3320 ret->target1_is_rel = 0;
3321 ret->target2_reloc = R_ARM_NONE;
3322 #ifdef FOUR_WORD_PLT
3323 ret->plt_header_size = 16;
3324 ret->plt_entry_size = 16;
3325 #else
3326 ret->plt_header_size = 20;
3327 ret->plt_entry_size = 12;
3328 #endif
3329 ret->fix_v4bx = 0;
3330 ret->use_blx = 0;
3331 ret->vxworks_p = 0;
3332 ret->symbian_p = 0;
3333 ret->use_rel = 1;
3334 ret->sym_cache.abfd = NULL;
3335 ret->obfd = abfd;
3336 ret->tls_ldm_got.refcount = 0;
3337 ret->stub_bfd = NULL;
3338 ret->add_stub_section = NULL;
3339 ret->layout_sections_again = NULL;
3340 ret->stub_group = NULL;
3341 ret->top_id = 0;
3342 ret->bfd_count = 0;
3343 ret->top_index = 0;
3344 ret->input_list = NULL;
3345
3346 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3347 sizeof (struct elf32_arm_stub_hash_entry)))
3348 {
3349 free (ret);
3350 return NULL;
3351 }
3352
3353 return &ret->root.root;
3354 }
3355
3356 /* Free the derived linker hash table. */
3357
3358 static void
3359 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
3360 {
3361 struct elf32_arm_link_hash_table *ret
3362 = (struct elf32_arm_link_hash_table *) hash;
3363
3364 bfd_hash_table_free (&ret->stub_hash_table);
3365 _bfd_generic_link_hash_table_free (hash);
3366 }
3367
3368 /* Determine if we're dealing with a Thumb only architecture. */
3369
3370 static bfd_boolean
3371 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3372 {
3373 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3374 Tag_CPU_arch);
3375 int profile;
3376
3377 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3378 return TRUE;
3379
3380 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3381 return FALSE;
3382
3383 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3384 Tag_CPU_arch_profile);
3385
3386 return profile == 'M';
3387 }
3388
3389 /* Determine if we're dealing with a Thumb-2 object. */
3390
3391 static bfd_boolean
3392 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3393 {
3394 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3395 Tag_CPU_arch);
3396 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3397 }
3398
3399 /* Determine what kind of NOPs are available. */
3400
3401 static bfd_boolean
3402 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3403 {
3404 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3405 Tag_CPU_arch);
3406 return arch == TAG_CPU_ARCH_V6T2
3407 || arch == TAG_CPU_ARCH_V6K
3408 || arch == TAG_CPU_ARCH_V7
3409 || arch == TAG_CPU_ARCH_V7E_M;
3410 }
3411
3412 static bfd_boolean
3413 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3414 {
3415 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3416 Tag_CPU_arch);
3417 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3418 || arch == TAG_CPU_ARCH_V7E_M);
3419 }
3420
3421 static bfd_boolean
3422 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3423 {
3424 switch (stub_type)
3425 {
3426 case arm_stub_long_branch_thumb_only:
3427 case arm_stub_long_branch_v4t_thumb_arm:
3428 case arm_stub_short_branch_v4t_thumb_arm:
3429 case arm_stub_long_branch_v4t_thumb_arm_pic:
3430 case arm_stub_long_branch_thumb_only_pic:
3431 return TRUE;
3432 case arm_stub_none:
3433 BFD_FAIL ();
3434 return FALSE;
3435 break;
3436 default:
3437 return FALSE;
3438 }
3439 }
3440
3441 /* Determine the type of stub needed, if any, for a call. */
3442
3443 static enum elf32_arm_stub_type
3444 arm_type_of_stub (struct bfd_link_info *info,
3445 asection *input_sec,
3446 const Elf_Internal_Rela *rel,
3447 unsigned char st_type,
3448 enum arm_st_branch_type *actual_branch_type,
3449 struct elf32_arm_link_hash_entry *hash,
3450 bfd_vma destination,
3451 asection *sym_sec,
3452 bfd *input_bfd,
3453 const char *name)
3454 {
3455 bfd_vma location;
3456 bfd_signed_vma branch_offset;
3457 unsigned int r_type;
3458 struct elf32_arm_link_hash_table * globals;
3459 int thumb2;
3460 int thumb_only;
3461 enum elf32_arm_stub_type stub_type = arm_stub_none;
3462 int use_plt = 0;
3463 enum arm_st_branch_type branch_type = *actual_branch_type;
3464 union gotplt_union *root_plt;
3465 struct arm_plt_info *arm_plt;
3466
3467 if (branch_type == ST_BRANCH_LONG)
3468 return stub_type;
3469
3470 globals = elf32_arm_hash_table (info);
3471 if (globals == NULL)
3472 return stub_type;
3473
3474 thumb_only = using_thumb_only (globals);
3475
3476 thumb2 = using_thumb2 (globals);
3477
3478 /* Determine where the call point is. */
3479 location = (input_sec->output_offset
3480 + input_sec->output_section->vma
3481 + rel->r_offset);
3482
3483 r_type = ELF32_R_TYPE (rel->r_info);
3484
3485 /* For TLS call relocs, it is the caller's responsibility to provide
3486 the address of the appropriate trampoline. */
3487 if (r_type != R_ARM_TLS_CALL
3488 && r_type != R_ARM_THM_TLS_CALL
3489 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3490 &root_plt, &arm_plt)
3491 && root_plt->offset != (bfd_vma) -1)
3492 {
3493 asection *splt;
3494
3495 if (hash == NULL || hash->is_iplt)
3496 splt = globals->root.iplt;
3497 else
3498 splt = globals->root.splt;
3499 if (splt != NULL)
3500 {
3501 use_plt = 1;
3502
3503 /* Note when dealing with PLT entries: the main PLT stub is in
3504 ARM mode, so if the branch is in Thumb mode, another
3505 Thumb->ARM stub will be inserted later just before the ARM
3506 PLT stub. We don't take this extra distance into account
3507 here, because if a long branch stub is needed, we'll add a
3508 Thumb->Arm one and branch directly to the ARM PLT entry
3509 because it avoids spreading offset corrections in several
3510 places. */
3511
3512 destination = (splt->output_section->vma
3513 + splt->output_offset
3514 + root_plt->offset);
3515 st_type = STT_FUNC;
3516 branch_type = ST_BRANCH_TO_ARM;
3517 }
3518 }
3519 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3520 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3521
3522 branch_offset = (bfd_signed_vma)(destination - location);
3523
3524 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3525 || r_type == R_ARM_THM_TLS_CALL)
3526 {
3527 /* Handle cases where:
3528 - this call goes too far (different Thumb/Thumb2 max
3529 distance)
3530 - it's a Thumb->Arm call and blx is not available, or it's a
3531 Thumb->Arm branch (not bl). A stub is needed in this case,
3532 but only if this call is not through a PLT entry. Indeed,
3533 PLT stubs handle mode switching already.
3534 */
3535 if ((!thumb2
3536 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3537 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3538 || (thumb2
3539 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3540 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3541 || (branch_type == ST_BRANCH_TO_ARM
3542 && (((r_type == R_ARM_THM_CALL
3543 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3544 || (r_type == R_ARM_THM_JUMP24))
3545 && !use_plt))
3546 {
3547 if (branch_type == ST_BRANCH_TO_THUMB)
3548 {
3549 /* Thumb to thumb. */
3550 if (!thumb_only)
3551 {
3552 stub_type = (info->shared | globals->pic_veneer)
3553 /* PIC stubs. */
3554 ? ((globals->use_blx
3555 && (r_type ==R_ARM_THM_CALL))
3556 /* V5T and above. Stub starts with ARM code, so
3557 we must be able to switch mode before
3558 reaching it, which is only possible for 'bl'
3559 (ie R_ARM_THM_CALL relocation). */
3560 ? arm_stub_long_branch_any_thumb_pic
3561 /* On V4T, use Thumb code only. */
3562 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3563
3564 /* non-PIC stubs. */
3565 : ((globals->use_blx
3566 && (r_type ==R_ARM_THM_CALL))
3567 /* V5T and above. */
3568 ? arm_stub_long_branch_any_any
3569 /* V4T. */
3570 : arm_stub_long_branch_v4t_thumb_thumb);
3571 }
3572 else
3573 {
3574 stub_type = (info->shared | globals->pic_veneer)
3575 /* PIC stub. */
3576 ? arm_stub_long_branch_thumb_only_pic
3577 /* non-PIC stub. */
3578 : arm_stub_long_branch_thumb_only;
3579 }
3580 }
3581 else
3582 {
3583 /* Thumb to arm. */
3584 if (sym_sec != NULL
3585 && sym_sec->owner != NULL
3586 && !INTERWORK_FLAG (sym_sec->owner))
3587 {
3588 (*_bfd_error_handler)
3589 (_("%B(%s): warning: interworking not enabled.\n"
3590 " first occurrence: %B: Thumb call to ARM"),
3591 sym_sec->owner, input_bfd, name);
3592 }
3593
3594 stub_type =
3595 (info->shared | globals->pic_veneer)
3596 /* PIC stubs. */
3597 ? (r_type == R_ARM_THM_TLS_CALL
3598 /* TLS PIC stubs */
3599 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3600 : arm_stub_long_branch_v4t_thumb_tls_pic)
3601 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3602 /* V5T PIC and above. */
3603 ? arm_stub_long_branch_any_arm_pic
3604 /* V4T PIC stub. */
3605 : arm_stub_long_branch_v4t_thumb_arm_pic))
3606
3607 /* non-PIC stubs. */
3608 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3609 /* V5T and above. */
3610 ? arm_stub_long_branch_any_any
3611 /* V4T. */
3612 : arm_stub_long_branch_v4t_thumb_arm);
3613
3614 /* Handle v4t short branches. */
3615 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3616 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3617 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3618 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3619 }
3620 }
3621 }
3622 else if (r_type == R_ARM_CALL
3623 || r_type == R_ARM_JUMP24
3624 || r_type == R_ARM_PLT32
3625 || r_type == R_ARM_TLS_CALL)
3626 {
3627 if (branch_type == ST_BRANCH_TO_THUMB)
3628 {
3629 /* Arm to thumb. */
3630
3631 if (sym_sec != NULL
3632 && sym_sec->owner != NULL
3633 && !INTERWORK_FLAG (sym_sec->owner))
3634 {
3635 (*_bfd_error_handler)
3636 (_("%B(%s): warning: interworking not enabled.\n"
3637 " first occurrence: %B: ARM call to Thumb"),
3638 sym_sec->owner, input_bfd, name);
3639 }
3640
3641 /* We have an extra 2-bytes reach because of
3642 the mode change (bit 24 (H) of BLX encoding). */
3643 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3644 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3645 || (r_type == R_ARM_CALL && !globals->use_blx)
3646 || (r_type == R_ARM_JUMP24)
3647 || (r_type == R_ARM_PLT32))
3648 {
3649 stub_type = (info->shared | globals->pic_veneer)
3650 /* PIC stubs. */
3651 ? ((globals->use_blx)
3652 /* V5T and above. */
3653 ? arm_stub_long_branch_any_thumb_pic
3654 /* V4T stub. */
3655 : arm_stub_long_branch_v4t_arm_thumb_pic)
3656
3657 /* non-PIC stubs. */
3658 : ((globals->use_blx)
3659 /* V5T and above. */
3660 ? arm_stub_long_branch_any_any
3661 /* V4T. */
3662 : arm_stub_long_branch_v4t_arm_thumb);
3663 }
3664 }
3665 else
3666 {
3667 /* Arm to arm. */
3668 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3669 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3670 {
3671 stub_type =
3672 (info->shared | globals->pic_veneer)
3673 /* PIC stubs. */
3674 ? (r_type == R_ARM_TLS_CALL
3675 /* TLS PIC Stub */
3676 ? arm_stub_long_branch_any_tls_pic
3677 : arm_stub_long_branch_any_arm_pic)
3678 /* non-PIC stubs. */
3679 : arm_stub_long_branch_any_any;
3680 }
3681 }
3682 }
3683
3684 /* If a stub is needed, record the actual destination type. */
3685 if (stub_type != arm_stub_none)
3686 *actual_branch_type = branch_type;
3687
3688 return stub_type;
3689 }
3690
3691 /* Build a name for an entry in the stub hash table. */
3692
3693 static char *
3694 elf32_arm_stub_name (const asection *input_section,
3695 const asection *sym_sec,
3696 const struct elf32_arm_link_hash_entry *hash,
3697 const Elf_Internal_Rela *rel,
3698 enum elf32_arm_stub_type stub_type)
3699 {
3700 char *stub_name;
3701 bfd_size_type len;
3702
3703 if (hash)
3704 {
3705 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3706 stub_name = (char *) bfd_malloc (len);
3707 if (stub_name != NULL)
3708 sprintf (stub_name, "%08x_%s+%x_%d",
3709 input_section->id & 0xffffffff,
3710 hash->root.root.root.string,
3711 (int) rel->r_addend & 0xffffffff,
3712 (int) stub_type);
3713 }
3714 else
3715 {
3716 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3717 stub_name = (char *) bfd_malloc (len);
3718 if (stub_name != NULL)
3719 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3720 input_section->id & 0xffffffff,
3721 sym_sec->id & 0xffffffff,
3722 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3723 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3724 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3725 (int) rel->r_addend & 0xffffffff,
3726 (int) stub_type);
3727 }
3728
3729 return stub_name;
3730 }
3731
3732 /* Look up an entry in the stub hash. Stub entries are cached because
3733 creating the stub name takes a bit of time. */
3734
3735 static struct elf32_arm_stub_hash_entry *
3736 elf32_arm_get_stub_entry (const asection *input_section,
3737 const asection *sym_sec,
3738 struct elf_link_hash_entry *hash,
3739 const Elf_Internal_Rela *rel,
3740 struct elf32_arm_link_hash_table *htab,
3741 enum elf32_arm_stub_type stub_type)
3742 {
3743 struct elf32_arm_stub_hash_entry *stub_entry;
3744 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3745 const asection *id_sec;
3746
3747 if ((input_section->flags & SEC_CODE) == 0)
3748 return NULL;
3749
3750 /* If this input section is part of a group of sections sharing one
3751 stub section, then use the id of the first section in the group.
3752 Stub names need to include a section id, as there may well be
3753 more than one stub used to reach say, printf, and we need to
3754 distinguish between them. */
3755 id_sec = htab->stub_group[input_section->id].link_sec;
3756
3757 if (h != NULL && h->stub_cache != NULL
3758 && h->stub_cache->h == h
3759 && h->stub_cache->id_sec == id_sec
3760 && h->stub_cache->stub_type == stub_type)
3761 {
3762 stub_entry = h->stub_cache;
3763 }
3764 else
3765 {
3766 char *stub_name;
3767
3768 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3769 if (stub_name == NULL)
3770 return NULL;
3771
3772 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3773 stub_name, FALSE, FALSE);
3774 if (h != NULL)
3775 h->stub_cache = stub_entry;
3776
3777 free (stub_name);
3778 }
3779
3780 return stub_entry;
3781 }
3782
3783 /* Find or create a stub section. Returns a pointer to the stub section, and
3784 the section to which the stub section will be attached (in *LINK_SEC_P).
3785 LINK_SEC_P may be NULL. */
3786
3787 static asection *
3788 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3789 struct elf32_arm_link_hash_table *htab)
3790 {
3791 asection *link_sec;
3792 asection *stub_sec;
3793
3794 link_sec = htab->stub_group[section->id].link_sec;
3795 stub_sec = htab->stub_group[section->id].stub_sec;
3796 if (stub_sec == NULL)
3797 {
3798 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3799 if (stub_sec == NULL)
3800 {
3801 size_t namelen;
3802 bfd_size_type len;
3803 char *s_name;
3804
3805 namelen = strlen (link_sec->name);
3806 len = namelen + sizeof (STUB_SUFFIX);
3807 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3808 if (s_name == NULL)
3809 return NULL;
3810
3811 memcpy (s_name, link_sec->name, namelen);
3812 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3813 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3814 if (stub_sec == NULL)
3815 return NULL;
3816 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3817 }
3818 htab->stub_group[section->id].stub_sec = stub_sec;
3819 }
3820
3821 if (link_sec_p)
3822 *link_sec_p = link_sec;
3823
3824 return stub_sec;
3825 }
3826
3827 /* Add a new stub entry to the stub hash. Not all fields of the new
3828 stub entry are initialised. */
3829
3830 static struct elf32_arm_stub_hash_entry *
3831 elf32_arm_add_stub (const char *stub_name,
3832 asection *section,
3833 struct elf32_arm_link_hash_table *htab)
3834 {
3835 asection *link_sec;
3836 asection *stub_sec;
3837 struct elf32_arm_stub_hash_entry *stub_entry;
3838
3839 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3840 if (stub_sec == NULL)
3841 return NULL;
3842
3843 /* Enter this entry into the linker stub hash table. */
3844 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3845 TRUE, FALSE);
3846 if (stub_entry == NULL)
3847 {
3848 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3849 section->owner,
3850 stub_name);
3851 return NULL;
3852 }
3853
3854 stub_entry->stub_sec = stub_sec;
3855 stub_entry->stub_offset = 0;
3856 stub_entry->id_sec = link_sec;
3857
3858 return stub_entry;
3859 }
3860
3861 /* Store an Arm insn into an output section not processed by
3862 elf32_arm_write_section. */
3863
3864 static void
3865 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3866 bfd * output_bfd, bfd_vma val, void * ptr)
3867 {
3868 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3869 bfd_putl32 (val, ptr);
3870 else
3871 bfd_putb32 (val, ptr);
3872 }
3873
3874 /* Store a 16-bit Thumb insn into an output section not processed by
3875 elf32_arm_write_section. */
3876
3877 static void
3878 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3879 bfd * output_bfd, bfd_vma val, void * ptr)
3880 {
3881 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3882 bfd_putl16 (val, ptr);
3883 else
3884 bfd_putb16 (val, ptr);
3885 }
3886
3887 /* If it's possible to change R_TYPE to a more efficient access
3888 model, return the new reloc type. */
3889
3890 static unsigned
3891 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
3892 struct elf_link_hash_entry *h)
3893 {
3894 int is_local = (h == NULL);
3895
3896 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
3897 return r_type;
3898
3899 /* We do not support relaxations for Old TLS models. */
3900 switch (r_type)
3901 {
3902 case R_ARM_TLS_GOTDESC:
3903 case R_ARM_TLS_CALL:
3904 case R_ARM_THM_TLS_CALL:
3905 case R_ARM_TLS_DESCSEQ:
3906 case R_ARM_THM_TLS_DESCSEQ:
3907 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
3908 }
3909
3910 return r_type;
3911 }
3912
3913 static bfd_reloc_status_type elf32_arm_final_link_relocate
3914 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3915 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3916 const char *, unsigned char, enum arm_st_branch_type,
3917 struct elf_link_hash_entry *, bfd_boolean *, char **);
3918
3919 static unsigned int
3920 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
3921 {
3922 switch (stub_type)
3923 {
3924 case arm_stub_a8_veneer_b_cond:
3925 case arm_stub_a8_veneer_b:
3926 case arm_stub_a8_veneer_bl:
3927 return 2;
3928
3929 case arm_stub_long_branch_any_any:
3930 case arm_stub_long_branch_v4t_arm_thumb:
3931 case arm_stub_long_branch_thumb_only:
3932 case arm_stub_long_branch_v4t_thumb_thumb:
3933 case arm_stub_long_branch_v4t_thumb_arm:
3934 case arm_stub_short_branch_v4t_thumb_arm:
3935 case arm_stub_long_branch_any_arm_pic:
3936 case arm_stub_long_branch_any_thumb_pic:
3937 case arm_stub_long_branch_v4t_thumb_thumb_pic:
3938 case arm_stub_long_branch_v4t_arm_thumb_pic:
3939 case arm_stub_long_branch_v4t_thumb_arm_pic:
3940 case arm_stub_long_branch_thumb_only_pic:
3941 case arm_stub_long_branch_any_tls_pic:
3942 case arm_stub_long_branch_v4t_thumb_tls_pic:
3943 case arm_stub_a8_veneer_blx:
3944 return 4;
3945
3946 default:
3947 abort (); /* Should be unreachable. */
3948 }
3949 }
3950
3951 static bfd_boolean
3952 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3953 void * in_arg)
3954 {
3955 #define MAXRELOCS 2
3956 struct elf32_arm_stub_hash_entry *stub_entry;
3957 struct elf32_arm_link_hash_table *globals;
3958 struct bfd_link_info *info;
3959 asection *stub_sec;
3960 bfd *stub_bfd;
3961 bfd_byte *loc;
3962 bfd_vma sym_value;
3963 int template_size;
3964 int size;
3965 const insn_sequence *template_sequence;
3966 int i;
3967 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3968 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3969 int nrelocs = 0;
3970
3971 /* Massage our args to the form they really have. */
3972 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3973 info = (struct bfd_link_info *) in_arg;
3974
3975 globals = elf32_arm_hash_table (info);
3976 if (globals == NULL)
3977 return FALSE;
3978
3979 stub_sec = stub_entry->stub_sec;
3980
3981 if ((globals->fix_cortex_a8 < 0)
3982 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
3983 /* We have to do less-strictly-aligned fixes last. */
3984 return TRUE;
3985
3986 /* Make a note of the offset within the stubs for this entry. */
3987 stub_entry->stub_offset = stub_sec->size;
3988 loc = stub_sec->contents + stub_entry->stub_offset;
3989
3990 stub_bfd = stub_sec->owner;
3991
3992 /* This is the address of the stub destination. */
3993 sym_value = (stub_entry->target_value
3994 + stub_entry->target_section->output_offset
3995 + stub_entry->target_section->output_section->vma);
3996
3997 template_sequence = stub_entry->stub_template;
3998 template_size = stub_entry->stub_template_size;
3999
4000 size = 0;
4001 for (i = 0; i < template_size; i++)
4002 {
4003 switch (template_sequence[i].type)
4004 {
4005 case THUMB16_TYPE:
4006 {
4007 bfd_vma data = (bfd_vma) template_sequence[i].data;
4008 if (template_sequence[i].reloc_addend != 0)
4009 {
4010 /* We've borrowed the reloc_addend field to mean we should
4011 insert a condition code into this (Thumb-1 branch)
4012 instruction. See THUMB16_BCOND_INSN. */
4013 BFD_ASSERT ((data & 0xff00) == 0xd000);
4014 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4015 }
4016 bfd_put_16 (stub_bfd, data, loc + size);
4017 size += 2;
4018 }
4019 break;
4020
4021 case THUMB32_TYPE:
4022 bfd_put_16 (stub_bfd,
4023 (template_sequence[i].data >> 16) & 0xffff,
4024 loc + size);
4025 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4026 loc + size + 2);
4027 if (template_sequence[i].r_type != R_ARM_NONE)
4028 {
4029 stub_reloc_idx[nrelocs] = i;
4030 stub_reloc_offset[nrelocs++] = size;
4031 }
4032 size += 4;
4033 break;
4034
4035 case ARM_TYPE:
4036 bfd_put_32 (stub_bfd, template_sequence[i].data,
4037 loc + size);
4038 /* Handle cases where the target is encoded within the
4039 instruction. */
4040 if (template_sequence[i].r_type == R_ARM_JUMP24)
4041 {
4042 stub_reloc_idx[nrelocs] = i;
4043 stub_reloc_offset[nrelocs++] = size;
4044 }
4045 size += 4;
4046 break;
4047
4048 case DATA_TYPE:
4049 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4050 stub_reloc_idx[nrelocs] = i;
4051 stub_reloc_offset[nrelocs++] = size;
4052 size += 4;
4053 break;
4054
4055 default:
4056 BFD_FAIL ();
4057 return FALSE;
4058 }
4059 }
4060
4061 stub_sec->size += size;
4062
4063 /* Stub size has already been computed in arm_size_one_stub. Check
4064 consistency. */
4065 BFD_ASSERT (size == stub_entry->stub_size);
4066
4067 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4068 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4069 sym_value |= 1;
4070
4071 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4072 in each stub. */
4073 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4074
4075 for (i = 0; i < nrelocs; i++)
4076 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4077 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4078 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4079 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4080 {
4081 Elf_Internal_Rela rel;
4082 bfd_boolean unresolved_reloc;
4083 char *error_message;
4084 enum arm_st_branch_type branch_type
4085 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4086 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4087 bfd_vma points_to = sym_value + stub_entry->target_addend;
4088
4089 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4090 rel.r_info = ELF32_R_INFO (0,
4091 template_sequence[stub_reloc_idx[i]].r_type);
4092 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4093
4094 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4095 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4096 template should refer back to the instruction after the original
4097 branch. */
4098 points_to = sym_value;
4099
4100 /* There may be unintended consequences if this is not true. */
4101 BFD_ASSERT (stub_entry->h == NULL);
4102
4103 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4104 properly. We should probably use this function unconditionally,
4105 rather than only for certain relocations listed in the enclosing
4106 conditional, for the sake of consistency. */
4107 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4108 (template_sequence[stub_reloc_idx[i]].r_type),
4109 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4110 points_to, info, stub_entry->target_section, "", STT_FUNC,
4111 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4112 &unresolved_reloc, &error_message);
4113 }
4114 else
4115 {
4116 Elf_Internal_Rela rel;
4117 bfd_boolean unresolved_reloc;
4118 char *error_message;
4119 bfd_vma points_to = sym_value + stub_entry->target_addend
4120 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4121
4122 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4123 rel.r_info = ELF32_R_INFO (0,
4124 template_sequence[stub_reloc_idx[i]].r_type);
4125 rel.r_addend = 0;
4126
4127 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4128 (template_sequence[stub_reloc_idx[i]].r_type),
4129 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4130 points_to, info, stub_entry->target_section, "", STT_FUNC,
4131 stub_entry->branch_type,
4132 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4133 &error_message);
4134 }
4135
4136 return TRUE;
4137 #undef MAXRELOCS
4138 }
4139
4140 /* Calculate the template, template size and instruction size for a stub.
4141 Return value is the instruction size. */
4142
4143 static unsigned int
4144 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4145 const insn_sequence **stub_template,
4146 int *stub_template_size)
4147 {
4148 const insn_sequence *template_sequence = NULL;
4149 int template_size = 0, i;
4150 unsigned int size;
4151
4152 template_sequence = stub_definitions[stub_type].template_sequence;
4153 if (stub_template)
4154 *stub_template = template_sequence;
4155
4156 template_size = stub_definitions[stub_type].template_size;
4157 if (stub_template_size)
4158 *stub_template_size = template_size;
4159
4160 size = 0;
4161 for (i = 0; i < template_size; i++)
4162 {
4163 switch (template_sequence[i].type)
4164 {
4165 case THUMB16_TYPE:
4166 size += 2;
4167 break;
4168
4169 case ARM_TYPE:
4170 case THUMB32_TYPE:
4171 case DATA_TYPE:
4172 size += 4;
4173 break;
4174
4175 default:
4176 BFD_FAIL ();
4177 return 0;
4178 }
4179 }
4180
4181 return size;
4182 }
4183
4184 /* As above, but don't actually build the stub. Just bump offset so
4185 we know stub section sizes. */
4186
4187 static bfd_boolean
4188 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4189 void *in_arg ATTRIBUTE_UNUSED)
4190 {
4191 struct elf32_arm_stub_hash_entry *stub_entry;
4192 const insn_sequence *template_sequence;
4193 int template_size, size;
4194
4195 /* Massage our args to the form they really have. */
4196 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4197
4198 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4199 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4200
4201 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4202 &template_size);
4203
4204 stub_entry->stub_size = size;
4205 stub_entry->stub_template = template_sequence;
4206 stub_entry->stub_template_size = template_size;
4207
4208 size = (size + 7) & ~7;
4209 stub_entry->stub_sec->size += size;
4210
4211 return TRUE;
4212 }
4213
4214 /* External entry points for sizing and building linker stubs. */
4215
4216 /* Set up various things so that we can make a list of input sections
4217 for each output section included in the link. Returns -1 on error,
4218 0 when no stubs will be needed, and 1 on success. */
4219
4220 int
4221 elf32_arm_setup_section_lists (bfd *output_bfd,
4222 struct bfd_link_info *info)
4223 {
4224 bfd *input_bfd;
4225 unsigned int bfd_count;
4226 int top_id, top_index;
4227 asection *section;
4228 asection **input_list, **list;
4229 bfd_size_type amt;
4230 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4231
4232 if (htab == NULL)
4233 return 0;
4234 if (! is_elf_hash_table (htab))
4235 return 0;
4236
4237 /* Count the number of input BFDs and find the top input section id. */
4238 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4239 input_bfd != NULL;
4240 input_bfd = input_bfd->link_next)
4241 {
4242 bfd_count += 1;
4243 for (section = input_bfd->sections;
4244 section != NULL;
4245 section = section->next)
4246 {
4247 if (top_id < section->id)
4248 top_id = section->id;
4249 }
4250 }
4251 htab->bfd_count = bfd_count;
4252
4253 amt = sizeof (struct map_stub) * (top_id + 1);
4254 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4255 if (htab->stub_group == NULL)
4256 return -1;
4257 htab->top_id = top_id;
4258
4259 /* We can't use output_bfd->section_count here to find the top output
4260 section index as some sections may have been removed, and
4261 _bfd_strip_section_from_output doesn't renumber the indices. */
4262 for (section = output_bfd->sections, top_index = 0;
4263 section != NULL;
4264 section = section->next)
4265 {
4266 if (top_index < section->index)
4267 top_index = section->index;
4268 }
4269
4270 htab->top_index = top_index;
4271 amt = sizeof (asection *) * (top_index + 1);
4272 input_list = (asection **) bfd_malloc (amt);
4273 htab->input_list = input_list;
4274 if (input_list == NULL)
4275 return -1;
4276
4277 /* For sections we aren't interested in, mark their entries with a
4278 value we can check later. */
4279 list = input_list + top_index;
4280 do
4281 *list = bfd_abs_section_ptr;
4282 while (list-- != input_list);
4283
4284 for (section = output_bfd->sections;
4285 section != NULL;
4286 section = section->next)
4287 {
4288 if ((section->flags & SEC_CODE) != 0)
4289 input_list[section->index] = NULL;
4290 }
4291
4292 return 1;
4293 }
4294
4295 /* The linker repeatedly calls this function for each input section,
4296 in the order that input sections are linked into output sections.
4297 Build lists of input sections to determine groupings between which
4298 we may insert linker stubs. */
4299
4300 void
4301 elf32_arm_next_input_section (struct bfd_link_info *info,
4302 asection *isec)
4303 {
4304 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4305
4306 if (htab == NULL)
4307 return;
4308
4309 if (isec->output_section->index <= htab->top_index)
4310 {
4311 asection **list = htab->input_list + isec->output_section->index;
4312
4313 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4314 {
4315 /* Steal the link_sec pointer for our list. */
4316 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4317 /* This happens to make the list in reverse order,
4318 which we reverse later. */
4319 PREV_SEC (isec) = *list;
4320 *list = isec;
4321 }
4322 }
4323 }
4324
4325 /* See whether we can group stub sections together. Grouping stub
4326 sections may result in fewer stubs. More importantly, we need to
4327 put all .init* and .fini* stubs at the end of the .init or
4328 .fini output sections respectively, because glibc splits the
4329 _init and _fini functions into multiple parts. Putting a stub in
4330 the middle of a function is not a good idea. */
4331
4332 static void
4333 group_sections (struct elf32_arm_link_hash_table *htab,
4334 bfd_size_type stub_group_size,
4335 bfd_boolean stubs_always_after_branch)
4336 {
4337 asection **list = htab->input_list;
4338
4339 do
4340 {
4341 asection *tail = *list;
4342 asection *head;
4343
4344 if (tail == bfd_abs_section_ptr)
4345 continue;
4346
4347 /* Reverse the list: we must avoid placing stubs at the
4348 beginning of the section because the beginning of the text
4349 section may be required for an interrupt vector in bare metal
4350 code. */
4351 #define NEXT_SEC PREV_SEC
4352 head = NULL;
4353 while (tail != NULL)
4354 {
4355 /* Pop from tail. */
4356 asection *item = tail;
4357 tail = PREV_SEC (item);
4358
4359 /* Push on head. */
4360 NEXT_SEC (item) = head;
4361 head = item;
4362 }
4363
4364 while (head != NULL)
4365 {
4366 asection *curr;
4367 asection *next;
4368 bfd_vma stub_group_start = head->output_offset;
4369 bfd_vma end_of_next;
4370
4371 curr = head;
4372 while (NEXT_SEC (curr) != NULL)
4373 {
4374 next = NEXT_SEC (curr);
4375 end_of_next = next->output_offset + next->size;
4376 if (end_of_next - stub_group_start >= stub_group_size)
4377 /* End of NEXT is too far from start, so stop. */
4378 break;
4379 /* Add NEXT to the group. */
4380 curr = next;
4381 }
4382
4383 /* OK, the size from the start to the start of CURR is less
4384 than stub_group_size and thus can be handled by one stub
4385 section. (Or the head section is itself larger than
4386 stub_group_size, in which case we may be toast.)
4387 We should really be keeping track of the total size of
4388 stubs added here, as stubs contribute to the final output
4389 section size. */
4390 do
4391 {
4392 next = NEXT_SEC (head);
4393 /* Set up this stub group. */
4394 htab->stub_group[head->id].link_sec = curr;
4395 }
4396 while (head != curr && (head = next) != NULL);
4397
4398 /* But wait, there's more! Input sections up to stub_group_size
4399 bytes after the stub section can be handled by it too. */
4400 if (!stubs_always_after_branch)
4401 {
4402 stub_group_start = curr->output_offset + curr->size;
4403
4404 while (next != NULL)
4405 {
4406 end_of_next = next->output_offset + next->size;
4407 if (end_of_next - stub_group_start >= stub_group_size)
4408 /* End of NEXT is too far from stubs, so stop. */
4409 break;
4410 /* Add NEXT to the stub group. */
4411 head = next;
4412 next = NEXT_SEC (head);
4413 htab->stub_group[head->id].link_sec = curr;
4414 }
4415 }
4416 head = next;
4417 }
4418 }
4419 while (list++ != htab->input_list + htab->top_index);
4420
4421 free (htab->input_list);
4422 #undef PREV_SEC
4423 #undef NEXT_SEC
4424 }
4425
4426 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4427 erratum fix. */
4428
4429 static int
4430 a8_reloc_compare (const void *a, const void *b)
4431 {
4432 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4433 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4434
4435 if (ra->from < rb->from)
4436 return -1;
4437 else if (ra->from > rb->from)
4438 return 1;
4439 else
4440 return 0;
4441 }
4442
4443 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4444 const char *, char **);
4445
4446 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4447 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4448 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4449 otherwise. */
4450
4451 static bfd_boolean
4452 cortex_a8_erratum_scan (bfd *input_bfd,
4453 struct bfd_link_info *info,
4454 struct a8_erratum_fix **a8_fixes_p,
4455 unsigned int *num_a8_fixes_p,
4456 unsigned int *a8_fix_table_size_p,
4457 struct a8_erratum_reloc *a8_relocs,
4458 unsigned int num_a8_relocs,
4459 unsigned prev_num_a8_fixes,
4460 bfd_boolean *stub_changed_p)
4461 {
4462 asection *section;
4463 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4464 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4465 unsigned int num_a8_fixes = *num_a8_fixes_p;
4466 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4467
4468 if (htab == NULL)
4469 return FALSE;
4470
4471 for (section = input_bfd->sections;
4472 section != NULL;
4473 section = section->next)
4474 {
4475 bfd_byte *contents = NULL;
4476 struct _arm_elf_section_data *sec_data;
4477 unsigned int span;
4478 bfd_vma base_vma;
4479
4480 if (elf_section_type (section) != SHT_PROGBITS
4481 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4482 || (section->flags & SEC_EXCLUDE) != 0
4483 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4484 || (section->output_section == bfd_abs_section_ptr))
4485 continue;
4486
4487 base_vma = section->output_section->vma + section->output_offset;
4488
4489 if (elf_section_data (section)->this_hdr.contents != NULL)
4490 contents = elf_section_data (section)->this_hdr.contents;
4491 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4492 return TRUE;
4493
4494 sec_data = elf32_arm_section_data (section);
4495
4496 for (span = 0; span < sec_data->mapcount; span++)
4497 {
4498 unsigned int span_start = sec_data->map[span].vma;
4499 unsigned int span_end = (span == sec_data->mapcount - 1)
4500 ? section->size : sec_data->map[span + 1].vma;
4501 unsigned int i;
4502 char span_type = sec_data->map[span].type;
4503 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4504
4505 if (span_type != 't')
4506 continue;
4507
4508 /* Span is entirely within a single 4KB region: skip scanning. */
4509 if (((base_vma + span_start) & ~0xfff)
4510 == ((base_vma + span_end) & ~0xfff))
4511 continue;
4512
4513 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4514
4515 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4516 * The branch target is in the same 4KB region as the
4517 first half of the branch.
4518 * The instruction before the branch is a 32-bit
4519 length non-branch instruction. */
4520 for (i = span_start; i < span_end;)
4521 {
4522 unsigned int insn = bfd_getl16 (&contents[i]);
4523 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4524 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4525
4526 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4527 insn_32bit = TRUE;
4528
4529 if (insn_32bit)
4530 {
4531 /* Load the rest of the insn (in manual-friendly order). */
4532 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4533
4534 /* Encoding T4: B<c>.W. */
4535 is_b = (insn & 0xf800d000) == 0xf0009000;
4536 /* Encoding T1: BL<c>.W. */
4537 is_bl = (insn & 0xf800d000) == 0xf000d000;
4538 /* Encoding T2: BLX<c>.W. */
4539 is_blx = (insn & 0xf800d000) == 0xf000c000;
4540 /* Encoding T3: B<c>.W (not permitted in IT block). */
4541 is_bcc = (insn & 0xf800d000) == 0xf0008000
4542 && (insn & 0x07f00000) != 0x03800000;
4543 }
4544
4545 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4546
4547 if (((base_vma + i) & 0xfff) == 0xffe
4548 && insn_32bit
4549 && is_32bit_branch
4550 && last_was_32bit
4551 && ! last_was_branch)
4552 {
4553 bfd_signed_vma offset = 0;
4554 bfd_boolean force_target_arm = FALSE;
4555 bfd_boolean force_target_thumb = FALSE;
4556 bfd_vma target;
4557 enum elf32_arm_stub_type stub_type = arm_stub_none;
4558 struct a8_erratum_reloc key, *found;
4559 bfd_boolean use_plt = FALSE;
4560
4561 key.from = base_vma + i;
4562 found = (struct a8_erratum_reloc *)
4563 bsearch (&key, a8_relocs, num_a8_relocs,
4564 sizeof (struct a8_erratum_reloc),
4565 &a8_reloc_compare);
4566
4567 if (found)
4568 {
4569 char *error_message = NULL;
4570 struct elf_link_hash_entry *entry;
4571
4572 /* We don't care about the error returned from this
4573 function, only if there is glue or not. */
4574 entry = find_thumb_glue (info, found->sym_name,
4575 &error_message);
4576
4577 if (entry)
4578 found->non_a8_stub = TRUE;
4579
4580 /* Keep a simpler condition, for the sake of clarity. */
4581 if (htab->root.splt != NULL && found->hash != NULL
4582 && found->hash->root.plt.offset != (bfd_vma) -1)
4583 use_plt = TRUE;
4584
4585 if (found->r_type == R_ARM_THM_CALL)
4586 {
4587 if (found->branch_type == ST_BRANCH_TO_ARM
4588 || use_plt)
4589 force_target_arm = TRUE;
4590 else
4591 force_target_thumb = TRUE;
4592 }
4593 }
4594
4595 /* Check if we have an offending branch instruction. */
4596
4597 if (found && found->non_a8_stub)
4598 /* We've already made a stub for this instruction, e.g.
4599 it's a long branch or a Thumb->ARM stub. Assume that
4600 stub will suffice to work around the A8 erratum (see
4601 setting of always_after_branch above). */
4602 ;
4603 else if (is_bcc)
4604 {
4605 offset = (insn & 0x7ff) << 1;
4606 offset |= (insn & 0x3f0000) >> 4;
4607 offset |= (insn & 0x2000) ? 0x40000 : 0;
4608 offset |= (insn & 0x800) ? 0x80000 : 0;
4609 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4610 if (offset & 0x100000)
4611 offset |= ~ ((bfd_signed_vma) 0xfffff);
4612 stub_type = arm_stub_a8_veneer_b_cond;
4613 }
4614 else if (is_b || is_bl || is_blx)
4615 {
4616 int s = (insn & 0x4000000) != 0;
4617 int j1 = (insn & 0x2000) != 0;
4618 int j2 = (insn & 0x800) != 0;
4619 int i1 = !(j1 ^ s);
4620 int i2 = !(j2 ^ s);
4621
4622 offset = (insn & 0x7ff) << 1;
4623 offset |= (insn & 0x3ff0000) >> 4;
4624 offset |= i2 << 22;
4625 offset |= i1 << 23;
4626 offset |= s << 24;
4627 if (offset & 0x1000000)
4628 offset |= ~ ((bfd_signed_vma) 0xffffff);
4629
4630 if (is_blx)
4631 offset &= ~ ((bfd_signed_vma) 3);
4632
4633 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4634 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4635 }
4636
4637 if (stub_type != arm_stub_none)
4638 {
4639 bfd_vma pc_for_insn = base_vma + i + 4;
4640
4641 /* The original instruction is a BL, but the target is
4642 an ARM instruction. If we were not making a stub,
4643 the BL would have been converted to a BLX. Use the
4644 BLX stub instead in that case. */
4645 if (htab->use_blx && force_target_arm
4646 && stub_type == arm_stub_a8_veneer_bl)
4647 {
4648 stub_type = arm_stub_a8_veneer_blx;
4649 is_blx = TRUE;
4650 is_bl = FALSE;
4651 }
4652 /* Conversely, if the original instruction was
4653 BLX but the target is Thumb mode, use the BL
4654 stub. */
4655 else if (force_target_thumb
4656 && stub_type == arm_stub_a8_veneer_blx)
4657 {
4658 stub_type = arm_stub_a8_veneer_bl;
4659 is_blx = FALSE;
4660 is_bl = TRUE;
4661 }
4662
4663 if (is_blx)
4664 pc_for_insn &= ~ ((bfd_vma) 3);
4665
4666 /* If we found a relocation, use the proper destination,
4667 not the offset in the (unrelocated) instruction.
4668 Note this is always done if we switched the stub type
4669 above. */
4670 if (found)
4671 offset =
4672 (bfd_signed_vma) (found->destination - pc_for_insn);
4673
4674 /* If the stub will use a Thumb-mode branch to a
4675 PLT target, redirect it to the preceding Thumb
4676 entry point. */
4677 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4678 offset -= PLT_THUMB_STUB_SIZE;
4679
4680 target = pc_for_insn + offset;
4681
4682 /* The BLX stub is ARM-mode code. Adjust the offset to
4683 take the different PC value (+8 instead of +4) into
4684 account. */
4685 if (stub_type == arm_stub_a8_veneer_blx)
4686 offset += 4;
4687
4688 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4689 {
4690 char *stub_name = NULL;
4691
4692 if (num_a8_fixes == a8_fix_table_size)
4693 {
4694 a8_fix_table_size *= 2;
4695 a8_fixes = (struct a8_erratum_fix *)
4696 bfd_realloc (a8_fixes,
4697 sizeof (struct a8_erratum_fix)
4698 * a8_fix_table_size);
4699 }
4700
4701 if (num_a8_fixes < prev_num_a8_fixes)
4702 {
4703 /* If we're doing a subsequent scan,
4704 check if we've found the same fix as
4705 before, and try and reuse the stub
4706 name. */
4707 stub_name = a8_fixes[num_a8_fixes].stub_name;
4708 if ((a8_fixes[num_a8_fixes].section != section)
4709 || (a8_fixes[num_a8_fixes].offset != i))
4710 {
4711 free (stub_name);
4712 stub_name = NULL;
4713 *stub_changed_p = TRUE;
4714 }
4715 }
4716
4717 if (!stub_name)
4718 {
4719 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4720 if (stub_name != NULL)
4721 sprintf (stub_name, "%x:%x", section->id, i);
4722 }
4723
4724 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4725 a8_fixes[num_a8_fixes].section = section;
4726 a8_fixes[num_a8_fixes].offset = i;
4727 a8_fixes[num_a8_fixes].addend = offset;
4728 a8_fixes[num_a8_fixes].orig_insn = insn;
4729 a8_fixes[num_a8_fixes].stub_name = stub_name;
4730 a8_fixes[num_a8_fixes].stub_type = stub_type;
4731 a8_fixes[num_a8_fixes].branch_type =
4732 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4733
4734 num_a8_fixes++;
4735 }
4736 }
4737 }
4738
4739 i += insn_32bit ? 4 : 2;
4740 last_was_32bit = insn_32bit;
4741 last_was_branch = is_32bit_branch;
4742 }
4743 }
4744
4745 if (elf_section_data (section)->this_hdr.contents == NULL)
4746 free (contents);
4747 }
4748
4749 *a8_fixes_p = a8_fixes;
4750 *num_a8_fixes_p = num_a8_fixes;
4751 *a8_fix_table_size_p = a8_fix_table_size;
4752
4753 return FALSE;
4754 }
4755
4756 /* Determine and set the size of the stub section for a final link.
4757
4758 The basic idea here is to examine all the relocations looking for
4759 PC-relative calls to a target that is unreachable with a "bl"
4760 instruction. */
4761
4762 bfd_boolean
4763 elf32_arm_size_stubs (bfd *output_bfd,
4764 bfd *stub_bfd,
4765 struct bfd_link_info *info,
4766 bfd_signed_vma group_size,
4767 asection * (*add_stub_section) (const char *, asection *),
4768 void (*layout_sections_again) (void))
4769 {
4770 bfd_size_type stub_group_size;
4771 bfd_boolean stubs_always_after_branch;
4772 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4773 struct a8_erratum_fix *a8_fixes = NULL;
4774 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4775 struct a8_erratum_reloc *a8_relocs = NULL;
4776 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4777
4778 if (htab == NULL)
4779 return FALSE;
4780
4781 if (htab->fix_cortex_a8)
4782 {
4783 a8_fixes = (struct a8_erratum_fix *)
4784 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4785 a8_relocs = (struct a8_erratum_reloc *)
4786 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4787 }
4788
4789 /* Propagate mach to stub bfd, because it may not have been
4790 finalized when we created stub_bfd. */
4791 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4792 bfd_get_mach (output_bfd));
4793
4794 /* Stash our params away. */
4795 htab->stub_bfd = stub_bfd;
4796 htab->add_stub_section = add_stub_section;
4797 htab->layout_sections_again = layout_sections_again;
4798 stubs_always_after_branch = group_size < 0;
4799
4800 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4801 as the first half of a 32-bit branch straddling two 4K pages. This is a
4802 crude way of enforcing that. */
4803 if (htab->fix_cortex_a8)
4804 stubs_always_after_branch = 1;
4805
4806 if (group_size < 0)
4807 stub_group_size = -group_size;
4808 else
4809 stub_group_size = group_size;
4810
4811 if (stub_group_size == 1)
4812 {
4813 /* Default values. */
4814 /* Thumb branch range is +-4MB has to be used as the default
4815 maximum size (a given section can contain both ARM and Thumb
4816 code, so the worst case has to be taken into account).
4817
4818 This value is 24K less than that, which allows for 2025
4819 12-byte stubs. If we exceed that, then we will fail to link.
4820 The user will have to relink with an explicit group size
4821 option. */
4822 stub_group_size = 4170000;
4823 }
4824
4825 group_sections (htab, stub_group_size, stubs_always_after_branch);
4826
4827 /* If we're applying the cortex A8 fix, we need to determine the
4828 program header size now, because we cannot change it later --
4829 that could alter section placements. Notice the A8 erratum fix
4830 ends up requiring the section addresses to remain unchanged
4831 modulo the page size. That's something we cannot represent
4832 inside BFD, and we don't want to force the section alignment to
4833 be the page size. */
4834 if (htab->fix_cortex_a8)
4835 (*htab->layout_sections_again) ();
4836
4837 while (1)
4838 {
4839 bfd *input_bfd;
4840 unsigned int bfd_indx;
4841 asection *stub_sec;
4842 bfd_boolean stub_changed = FALSE;
4843 unsigned prev_num_a8_fixes = num_a8_fixes;
4844
4845 num_a8_fixes = 0;
4846 for (input_bfd = info->input_bfds, bfd_indx = 0;
4847 input_bfd != NULL;
4848 input_bfd = input_bfd->link_next, bfd_indx++)
4849 {
4850 Elf_Internal_Shdr *symtab_hdr;
4851 asection *section;
4852 Elf_Internal_Sym *local_syms = NULL;
4853
4854 num_a8_relocs = 0;
4855
4856 /* We'll need the symbol table in a second. */
4857 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4858 if (symtab_hdr->sh_info == 0)
4859 continue;
4860
4861 /* Walk over each section attached to the input bfd. */
4862 for (section = input_bfd->sections;
4863 section != NULL;
4864 section = section->next)
4865 {
4866 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4867
4868 /* If there aren't any relocs, then there's nothing more
4869 to do. */
4870 if ((section->flags & SEC_RELOC) == 0
4871 || section->reloc_count == 0
4872 || (section->flags & SEC_CODE) == 0)
4873 continue;
4874
4875 /* If this section is a link-once section that will be
4876 discarded, then don't create any stubs. */
4877 if (section->output_section == NULL
4878 || section->output_section->owner != output_bfd)
4879 continue;
4880
4881 /* Get the relocs. */
4882 internal_relocs
4883 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4884 NULL, info->keep_memory);
4885 if (internal_relocs == NULL)
4886 goto error_ret_free_local;
4887
4888 /* Now examine each relocation. */
4889 irela = internal_relocs;
4890 irelaend = irela + section->reloc_count;
4891 for (; irela < irelaend; irela++)
4892 {
4893 unsigned int r_type, r_indx;
4894 enum elf32_arm_stub_type stub_type;
4895 struct elf32_arm_stub_hash_entry *stub_entry;
4896 asection *sym_sec;
4897 bfd_vma sym_value;
4898 bfd_vma destination;
4899 struct elf32_arm_link_hash_entry *hash;
4900 const char *sym_name;
4901 char *stub_name;
4902 const asection *id_sec;
4903 unsigned char st_type;
4904 enum arm_st_branch_type branch_type;
4905 bfd_boolean created_stub = FALSE;
4906
4907 r_type = ELF32_R_TYPE (irela->r_info);
4908 r_indx = ELF32_R_SYM (irela->r_info);
4909
4910 if (r_type >= (unsigned int) R_ARM_max)
4911 {
4912 bfd_set_error (bfd_error_bad_value);
4913 error_ret_free_internal:
4914 if (elf_section_data (section)->relocs == NULL)
4915 free (internal_relocs);
4916 goto error_ret_free_local;
4917 }
4918
4919 hash = NULL;
4920 if (r_indx >= symtab_hdr->sh_info)
4921 hash = elf32_arm_hash_entry
4922 (elf_sym_hashes (input_bfd)
4923 [r_indx - symtab_hdr->sh_info]);
4924
4925 /* Only look for stubs on branch instructions, or
4926 non-relaxed TLSCALL */
4927 if ((r_type != (unsigned int) R_ARM_CALL)
4928 && (r_type != (unsigned int) R_ARM_THM_CALL)
4929 && (r_type != (unsigned int) R_ARM_JUMP24)
4930 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4931 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4932 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4933 && (r_type != (unsigned int) R_ARM_PLT32)
4934 && !((r_type == (unsigned int) R_ARM_TLS_CALL
4935 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
4936 && r_type == elf32_arm_tls_transition
4937 (info, r_type, &hash->root)
4938 && ((hash ? hash->tls_type
4939 : (elf32_arm_local_got_tls_type
4940 (input_bfd)[r_indx]))
4941 & GOT_TLS_GDESC) != 0))
4942 continue;
4943
4944 /* Now determine the call target, its name, value,
4945 section. */
4946 sym_sec = NULL;
4947 sym_value = 0;
4948 destination = 0;
4949 sym_name = NULL;
4950
4951 if (r_type == (unsigned int) R_ARM_TLS_CALL
4952 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
4953 {
4954 /* A non-relaxed TLS call. The target is the
4955 plt-resident trampoline and nothing to do
4956 with the symbol. */
4957 BFD_ASSERT (htab->tls_trampoline > 0);
4958 sym_sec = htab->root.splt;
4959 sym_value = htab->tls_trampoline;
4960 hash = 0;
4961 st_type = STT_FUNC;
4962 branch_type = ST_BRANCH_TO_ARM;
4963 }
4964 else if (!hash)
4965 {
4966 /* It's a local symbol. */
4967 Elf_Internal_Sym *sym;
4968
4969 if (local_syms == NULL)
4970 {
4971 local_syms
4972 = (Elf_Internal_Sym *) symtab_hdr->contents;
4973 if (local_syms == NULL)
4974 local_syms
4975 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4976 symtab_hdr->sh_info, 0,
4977 NULL, NULL, NULL);
4978 if (local_syms == NULL)
4979 goto error_ret_free_internal;
4980 }
4981
4982 sym = local_syms + r_indx;
4983 if (sym->st_shndx == SHN_UNDEF)
4984 sym_sec = bfd_und_section_ptr;
4985 else if (sym->st_shndx == SHN_ABS)
4986 sym_sec = bfd_abs_section_ptr;
4987 else if (sym->st_shndx == SHN_COMMON)
4988 sym_sec = bfd_com_section_ptr;
4989 else
4990 sym_sec =
4991 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
4992
4993 if (!sym_sec)
4994 /* This is an undefined symbol. It can never
4995 be resolved. */
4996 continue;
4997
4998 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4999 sym_value = sym->st_value;
5000 destination = (sym_value + irela->r_addend
5001 + sym_sec->output_offset
5002 + sym_sec->output_section->vma);
5003 st_type = ELF_ST_TYPE (sym->st_info);
5004 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5005 sym_name
5006 = bfd_elf_string_from_elf_section (input_bfd,
5007 symtab_hdr->sh_link,
5008 sym->st_name);
5009 }
5010 else
5011 {
5012 /* It's an external symbol. */
5013 while (hash->root.root.type == bfd_link_hash_indirect
5014 || hash->root.root.type == bfd_link_hash_warning)
5015 hash = ((struct elf32_arm_link_hash_entry *)
5016 hash->root.root.u.i.link);
5017
5018 if (hash->root.root.type == bfd_link_hash_defined
5019 || hash->root.root.type == bfd_link_hash_defweak)
5020 {
5021 sym_sec = hash->root.root.u.def.section;
5022 sym_value = hash->root.root.u.def.value;
5023
5024 struct elf32_arm_link_hash_table *globals =
5025 elf32_arm_hash_table (info);
5026
5027 /* For a destination in a shared library,
5028 use the PLT stub as target address to
5029 decide whether a branch stub is
5030 needed. */
5031 if (globals != NULL
5032 && globals->root.splt != NULL
5033 && hash != NULL
5034 && hash->root.plt.offset != (bfd_vma) -1)
5035 {
5036 sym_sec = globals->root.splt;
5037 sym_value = hash->root.plt.offset;
5038 if (sym_sec->output_section != NULL)
5039 destination = (sym_value
5040 + sym_sec->output_offset
5041 + sym_sec->output_section->vma);
5042 }
5043 else if (sym_sec->output_section != NULL)
5044 destination = (sym_value + irela->r_addend
5045 + sym_sec->output_offset
5046 + sym_sec->output_section->vma);
5047 }
5048 else if ((hash->root.root.type == bfd_link_hash_undefined)
5049 || (hash->root.root.type == bfd_link_hash_undefweak))
5050 {
5051 /* For a shared library, use the PLT stub as
5052 target address to decide whether a long
5053 branch stub is needed.
5054 For absolute code, they cannot be handled. */
5055 struct elf32_arm_link_hash_table *globals =
5056 elf32_arm_hash_table (info);
5057
5058 if (globals != NULL
5059 && globals->root.splt != NULL
5060 && hash != NULL
5061 && hash->root.plt.offset != (bfd_vma) -1)
5062 {
5063 sym_sec = globals->root.splt;
5064 sym_value = hash->root.plt.offset;
5065 if (sym_sec->output_section != NULL)
5066 destination = (sym_value
5067 + sym_sec->output_offset
5068 + sym_sec->output_section->vma);
5069 }
5070 else
5071 continue;
5072 }
5073 else
5074 {
5075 bfd_set_error (bfd_error_bad_value);
5076 goto error_ret_free_internal;
5077 }
5078 st_type = hash->root.type;
5079 branch_type = hash->root.target_internal;
5080 sym_name = hash->root.root.root.string;
5081 }
5082
5083 do
5084 {
5085 /* Determine what (if any) linker stub is needed. */
5086 stub_type = arm_type_of_stub (info, section, irela,
5087 st_type, &branch_type,
5088 hash, destination, sym_sec,
5089 input_bfd, sym_name);
5090 if (stub_type == arm_stub_none)
5091 break;
5092
5093 /* Support for grouping stub sections. */
5094 id_sec = htab->stub_group[section->id].link_sec;
5095
5096 /* Get the name of this stub. */
5097 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5098 irela, stub_type);
5099 if (!stub_name)
5100 goto error_ret_free_internal;
5101
5102 /* We've either created a stub for this reloc already,
5103 or we are about to. */
5104 created_stub = TRUE;
5105
5106 stub_entry = arm_stub_hash_lookup
5107 (&htab->stub_hash_table, stub_name,
5108 FALSE, FALSE);
5109 if (stub_entry != NULL)
5110 {
5111 /* The proper stub has already been created. */
5112 free (stub_name);
5113 stub_entry->target_value = sym_value;
5114 break;
5115 }
5116
5117 stub_entry = elf32_arm_add_stub (stub_name, section,
5118 htab);
5119 if (stub_entry == NULL)
5120 {
5121 free (stub_name);
5122 goto error_ret_free_internal;
5123 }
5124
5125 stub_entry->target_value = sym_value;
5126 stub_entry->target_section = sym_sec;
5127 stub_entry->stub_type = stub_type;
5128 stub_entry->h = hash;
5129 stub_entry->branch_type = branch_type;
5130
5131 if (sym_name == NULL)
5132 sym_name = "unnamed";
5133 stub_entry->output_name = (char *)
5134 bfd_alloc (htab->stub_bfd,
5135 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5136 + strlen (sym_name));
5137 if (stub_entry->output_name == NULL)
5138 {
5139 free (stub_name);
5140 goto error_ret_free_internal;
5141 }
5142
5143 /* For historical reasons, use the existing names for
5144 ARM-to-Thumb and Thumb-to-ARM stubs. */
5145 if ((r_type == (unsigned int) R_ARM_THM_CALL
5146 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5147 && branch_type == ST_BRANCH_TO_ARM)
5148 sprintf (stub_entry->output_name,
5149 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5150 else if ((r_type == (unsigned int) R_ARM_CALL
5151 || r_type == (unsigned int) R_ARM_JUMP24)
5152 && branch_type == ST_BRANCH_TO_THUMB)
5153 sprintf (stub_entry->output_name,
5154 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5155 else
5156 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5157 sym_name);
5158
5159 stub_changed = TRUE;
5160 }
5161 while (0);
5162
5163 /* Look for relocations which might trigger Cortex-A8
5164 erratum. */
5165 if (htab->fix_cortex_a8
5166 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5167 || r_type == (unsigned int) R_ARM_THM_JUMP19
5168 || r_type == (unsigned int) R_ARM_THM_CALL
5169 || r_type == (unsigned int) R_ARM_THM_XPC22))
5170 {
5171 bfd_vma from = section->output_section->vma
5172 + section->output_offset
5173 + irela->r_offset;
5174
5175 if ((from & 0xfff) == 0xffe)
5176 {
5177 /* Found a candidate. Note we haven't checked the
5178 destination is within 4K here: if we do so (and
5179 don't create an entry in a8_relocs) we can't tell
5180 that a branch should have been relocated when
5181 scanning later. */
5182 if (num_a8_relocs == a8_reloc_table_size)
5183 {
5184 a8_reloc_table_size *= 2;
5185 a8_relocs = (struct a8_erratum_reloc *)
5186 bfd_realloc (a8_relocs,
5187 sizeof (struct a8_erratum_reloc)
5188 * a8_reloc_table_size);
5189 }
5190
5191 a8_relocs[num_a8_relocs].from = from;
5192 a8_relocs[num_a8_relocs].destination = destination;
5193 a8_relocs[num_a8_relocs].r_type = r_type;
5194 a8_relocs[num_a8_relocs].branch_type = branch_type;
5195 a8_relocs[num_a8_relocs].sym_name = sym_name;
5196 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5197 a8_relocs[num_a8_relocs].hash = hash;
5198
5199 num_a8_relocs++;
5200 }
5201 }
5202 }
5203
5204 /* We're done with the internal relocs, free them. */
5205 if (elf_section_data (section)->relocs == NULL)
5206 free (internal_relocs);
5207 }
5208
5209 if (htab->fix_cortex_a8)
5210 {
5211 /* Sort relocs which might apply to Cortex-A8 erratum. */
5212 qsort (a8_relocs, num_a8_relocs,
5213 sizeof (struct a8_erratum_reloc),
5214 &a8_reloc_compare);
5215
5216 /* Scan for branches which might trigger Cortex-A8 erratum. */
5217 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5218 &num_a8_fixes, &a8_fix_table_size,
5219 a8_relocs, num_a8_relocs,
5220 prev_num_a8_fixes, &stub_changed)
5221 != 0)
5222 goto error_ret_free_local;
5223 }
5224 }
5225
5226 if (prev_num_a8_fixes != num_a8_fixes)
5227 stub_changed = TRUE;
5228
5229 if (!stub_changed)
5230 break;
5231
5232 /* OK, we've added some stubs. Find out the new size of the
5233 stub sections. */
5234 for (stub_sec = htab->stub_bfd->sections;
5235 stub_sec != NULL;
5236 stub_sec = stub_sec->next)
5237 {
5238 /* Ignore non-stub sections. */
5239 if (!strstr (stub_sec->name, STUB_SUFFIX))
5240 continue;
5241
5242 stub_sec->size = 0;
5243 }
5244
5245 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5246
5247 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5248 if (htab->fix_cortex_a8)
5249 for (i = 0; i < num_a8_fixes; i++)
5250 {
5251 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5252 a8_fixes[i].section, htab);
5253
5254 if (stub_sec == NULL)
5255 goto error_ret_free_local;
5256
5257 stub_sec->size
5258 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5259 NULL);
5260 }
5261
5262
5263 /* Ask the linker to do its stuff. */
5264 (*htab->layout_sections_again) ();
5265 }
5266
5267 /* Add stubs for Cortex-A8 erratum fixes now. */
5268 if (htab->fix_cortex_a8)
5269 {
5270 for (i = 0; i < num_a8_fixes; i++)
5271 {
5272 struct elf32_arm_stub_hash_entry *stub_entry;
5273 char *stub_name = a8_fixes[i].stub_name;
5274 asection *section = a8_fixes[i].section;
5275 unsigned int section_id = a8_fixes[i].section->id;
5276 asection *link_sec = htab->stub_group[section_id].link_sec;
5277 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5278 const insn_sequence *template_sequence;
5279 int template_size, size = 0;
5280
5281 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5282 TRUE, FALSE);
5283 if (stub_entry == NULL)
5284 {
5285 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5286 section->owner,
5287 stub_name);
5288 return FALSE;
5289 }
5290
5291 stub_entry->stub_sec = stub_sec;
5292 stub_entry->stub_offset = 0;
5293 stub_entry->id_sec = link_sec;
5294 stub_entry->stub_type = a8_fixes[i].stub_type;
5295 stub_entry->target_section = a8_fixes[i].section;
5296 stub_entry->target_value = a8_fixes[i].offset;
5297 stub_entry->target_addend = a8_fixes[i].addend;
5298 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5299 stub_entry->branch_type = a8_fixes[i].branch_type;
5300
5301 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5302 &template_sequence,
5303 &template_size);
5304
5305 stub_entry->stub_size = size;
5306 stub_entry->stub_template = template_sequence;
5307 stub_entry->stub_template_size = template_size;
5308 }
5309
5310 /* Stash the Cortex-A8 erratum fix array for use later in
5311 elf32_arm_write_section(). */
5312 htab->a8_erratum_fixes = a8_fixes;
5313 htab->num_a8_erratum_fixes = num_a8_fixes;
5314 }
5315 else
5316 {
5317 htab->a8_erratum_fixes = NULL;
5318 htab->num_a8_erratum_fixes = 0;
5319 }
5320 return TRUE;
5321
5322 error_ret_free_local:
5323 return FALSE;
5324 }
5325
5326 /* Build all the stubs associated with the current output file. The
5327 stubs are kept in a hash table attached to the main linker hash
5328 table. We also set up the .plt entries for statically linked PIC
5329 functions here. This function is called via arm_elf_finish in the
5330 linker. */
5331
5332 bfd_boolean
5333 elf32_arm_build_stubs (struct bfd_link_info *info)
5334 {
5335 asection *stub_sec;
5336 struct bfd_hash_table *table;
5337 struct elf32_arm_link_hash_table *htab;
5338
5339 htab = elf32_arm_hash_table (info);
5340 if (htab == NULL)
5341 return FALSE;
5342
5343 for (stub_sec = htab->stub_bfd->sections;
5344 stub_sec != NULL;
5345 stub_sec = stub_sec->next)
5346 {
5347 bfd_size_type size;
5348
5349 /* Ignore non-stub sections. */
5350 if (!strstr (stub_sec->name, STUB_SUFFIX))
5351 continue;
5352
5353 /* Allocate memory to hold the linker stubs. */
5354 size = stub_sec->size;
5355 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5356 if (stub_sec->contents == NULL && size != 0)
5357 return FALSE;
5358 stub_sec->size = 0;
5359 }
5360
5361 /* Build the stubs as directed by the stub hash table. */
5362 table = &htab->stub_hash_table;
5363 bfd_hash_traverse (table, arm_build_one_stub, info);
5364 if (htab->fix_cortex_a8)
5365 {
5366 /* Place the cortex a8 stubs last. */
5367 htab->fix_cortex_a8 = -1;
5368 bfd_hash_traverse (table, arm_build_one_stub, info);
5369 }
5370
5371 return TRUE;
5372 }
5373
5374 /* Locate the Thumb encoded calling stub for NAME. */
5375
5376 static struct elf_link_hash_entry *
5377 find_thumb_glue (struct bfd_link_info *link_info,
5378 const char *name,
5379 char **error_message)
5380 {
5381 char *tmp_name;
5382 struct elf_link_hash_entry *hash;
5383 struct elf32_arm_link_hash_table *hash_table;
5384
5385 /* We need a pointer to the armelf specific hash table. */
5386 hash_table = elf32_arm_hash_table (link_info);
5387 if (hash_table == NULL)
5388 return NULL;
5389
5390 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5391 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5392
5393 BFD_ASSERT (tmp_name);
5394
5395 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5396
5397 hash = elf_link_hash_lookup
5398 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5399
5400 if (hash == NULL
5401 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5402 tmp_name, name) == -1)
5403 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5404
5405 free (tmp_name);
5406
5407 return hash;
5408 }
5409
5410 /* Locate the ARM encoded calling stub for NAME. */
5411
5412 static struct elf_link_hash_entry *
5413 find_arm_glue (struct bfd_link_info *link_info,
5414 const char *name,
5415 char **error_message)
5416 {
5417 char *tmp_name;
5418 struct elf_link_hash_entry *myh;
5419 struct elf32_arm_link_hash_table *hash_table;
5420
5421 /* We need a pointer to the elfarm specific hash table. */
5422 hash_table = elf32_arm_hash_table (link_info);
5423 if (hash_table == NULL)
5424 return NULL;
5425
5426 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5427 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5428
5429 BFD_ASSERT (tmp_name);
5430
5431 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5432
5433 myh = elf_link_hash_lookup
5434 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5435
5436 if (myh == NULL
5437 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5438 tmp_name, name) == -1)
5439 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5440
5441 free (tmp_name);
5442
5443 return myh;
5444 }
5445
5446 /* ARM->Thumb glue (static images):
5447
5448 .arm
5449 __func_from_arm:
5450 ldr r12, __func_addr
5451 bx r12
5452 __func_addr:
5453 .word func @ behave as if you saw a ARM_32 reloc.
5454
5455 (v5t static images)
5456 .arm
5457 __func_from_arm:
5458 ldr pc, __func_addr
5459 __func_addr:
5460 .word func @ behave as if you saw a ARM_32 reloc.
5461
5462 (relocatable images)
5463 .arm
5464 __func_from_arm:
5465 ldr r12, __func_offset
5466 add r12, r12, pc
5467 bx r12
5468 __func_offset:
5469 .word func - . */
5470
5471 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5472 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5473 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5474 static const insn32 a2t3_func_addr_insn = 0x00000001;
5475
5476 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5477 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5478 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5479
5480 #define ARM2THUMB_PIC_GLUE_SIZE 16
5481 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5482 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5483 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5484
5485 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5486
5487 .thumb .thumb
5488 .align 2 .align 2
5489 __func_from_thumb: __func_from_thumb:
5490 bx pc push {r6, lr}
5491 nop ldr r6, __func_addr
5492 .arm mov lr, pc
5493 b func bx r6
5494 .arm
5495 ;; back_to_thumb
5496 ldmia r13! {r6, lr}
5497 bx lr
5498 __func_addr:
5499 .word func */
5500
5501 #define THUMB2ARM_GLUE_SIZE 8
5502 static const insn16 t2a1_bx_pc_insn = 0x4778;
5503 static const insn16 t2a2_noop_insn = 0x46c0;
5504 static const insn32 t2a3_b_insn = 0xea000000;
5505
5506 #define VFP11_ERRATUM_VENEER_SIZE 8
5507
5508 #define ARM_BX_VENEER_SIZE 12
5509 static const insn32 armbx1_tst_insn = 0xe3100001;
5510 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5511 static const insn32 armbx3_bx_insn = 0xe12fff10;
5512
5513 #ifndef ELFARM_NABI_C_INCLUDED
5514 static void
5515 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5516 {
5517 asection * s;
5518 bfd_byte * contents;
5519
5520 if (size == 0)
5521 {
5522 /* Do not include empty glue sections in the output. */
5523 if (abfd != NULL)
5524 {
5525 s = bfd_get_section_by_name (abfd, name);
5526 if (s != NULL)
5527 s->flags |= SEC_EXCLUDE;
5528 }
5529 return;
5530 }
5531
5532 BFD_ASSERT (abfd != NULL);
5533
5534 s = bfd_get_section_by_name (abfd, name);
5535 BFD_ASSERT (s != NULL);
5536
5537 contents = (bfd_byte *) bfd_alloc (abfd, size);
5538
5539 BFD_ASSERT (s->size == size);
5540 s->contents = contents;
5541 }
5542
5543 bfd_boolean
5544 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5545 {
5546 struct elf32_arm_link_hash_table * globals;
5547
5548 globals = elf32_arm_hash_table (info);
5549 BFD_ASSERT (globals != NULL);
5550
5551 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5552 globals->arm_glue_size,
5553 ARM2THUMB_GLUE_SECTION_NAME);
5554
5555 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5556 globals->thumb_glue_size,
5557 THUMB2ARM_GLUE_SECTION_NAME);
5558
5559 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5560 globals->vfp11_erratum_glue_size,
5561 VFP11_ERRATUM_VENEER_SECTION_NAME);
5562
5563 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5564 globals->bx_glue_size,
5565 ARM_BX_GLUE_SECTION_NAME);
5566
5567 return TRUE;
5568 }
5569
5570 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5571 returns the symbol identifying the stub. */
5572
5573 static struct elf_link_hash_entry *
5574 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5575 struct elf_link_hash_entry * h)
5576 {
5577 const char * name = h->root.root.string;
5578 asection * s;
5579 char * tmp_name;
5580 struct elf_link_hash_entry * myh;
5581 struct bfd_link_hash_entry * bh;
5582 struct elf32_arm_link_hash_table * globals;
5583 bfd_vma val;
5584 bfd_size_type size;
5585
5586 globals = elf32_arm_hash_table (link_info);
5587 BFD_ASSERT (globals != NULL);
5588 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5589
5590 s = bfd_get_section_by_name
5591 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5592
5593 BFD_ASSERT (s != NULL);
5594
5595 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5596 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5597
5598 BFD_ASSERT (tmp_name);
5599
5600 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5601
5602 myh = elf_link_hash_lookup
5603 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5604
5605 if (myh != NULL)
5606 {
5607 /* We've already seen this guy. */
5608 free (tmp_name);
5609 return myh;
5610 }
5611
5612 /* The only trick here is using hash_table->arm_glue_size as the value.
5613 Even though the section isn't allocated yet, this is where we will be
5614 putting it. The +1 on the value marks that the stub has not been
5615 output yet - not that it is a Thumb function. */
5616 bh = NULL;
5617 val = globals->arm_glue_size + 1;
5618 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5619 tmp_name, BSF_GLOBAL, s, val,
5620 NULL, TRUE, FALSE, &bh);
5621
5622 myh = (struct elf_link_hash_entry *) bh;
5623 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5624 myh->forced_local = 1;
5625
5626 free (tmp_name);
5627
5628 if (link_info->shared || globals->root.is_relocatable_executable
5629 || globals->pic_veneer)
5630 size = ARM2THUMB_PIC_GLUE_SIZE;
5631 else if (globals->use_blx)
5632 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5633 else
5634 size = ARM2THUMB_STATIC_GLUE_SIZE;
5635
5636 s->size += size;
5637 globals->arm_glue_size += size;
5638
5639 return myh;
5640 }
5641
5642 /* Allocate space for ARMv4 BX veneers. */
5643
5644 static void
5645 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5646 {
5647 asection * s;
5648 struct elf32_arm_link_hash_table *globals;
5649 char *tmp_name;
5650 struct elf_link_hash_entry *myh;
5651 struct bfd_link_hash_entry *bh;
5652 bfd_vma val;
5653
5654 /* BX PC does not need a veneer. */
5655 if (reg == 15)
5656 return;
5657
5658 globals = elf32_arm_hash_table (link_info);
5659 BFD_ASSERT (globals != NULL);
5660 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5661
5662 /* Check if this veneer has already been allocated. */
5663 if (globals->bx_glue_offset[reg])
5664 return;
5665
5666 s = bfd_get_section_by_name
5667 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5668
5669 BFD_ASSERT (s != NULL);
5670
5671 /* Add symbol for veneer. */
5672 tmp_name = (char *)
5673 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5674
5675 BFD_ASSERT (tmp_name);
5676
5677 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5678
5679 myh = elf_link_hash_lookup
5680 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5681
5682 BFD_ASSERT (myh == NULL);
5683
5684 bh = NULL;
5685 val = globals->bx_glue_size;
5686 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5687 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5688 NULL, TRUE, FALSE, &bh);
5689
5690 myh = (struct elf_link_hash_entry *) bh;
5691 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5692 myh->forced_local = 1;
5693
5694 s->size += ARM_BX_VENEER_SIZE;
5695 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5696 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5697 }
5698
5699
5700 /* Add an entry to the code/data map for section SEC. */
5701
5702 static void
5703 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5704 {
5705 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5706 unsigned int newidx;
5707
5708 if (sec_data->map == NULL)
5709 {
5710 sec_data->map = (elf32_arm_section_map *)
5711 bfd_malloc (sizeof (elf32_arm_section_map));
5712 sec_data->mapcount = 0;
5713 sec_data->mapsize = 1;
5714 }
5715
5716 newidx = sec_data->mapcount++;
5717
5718 if (sec_data->mapcount > sec_data->mapsize)
5719 {
5720 sec_data->mapsize *= 2;
5721 sec_data->map = (elf32_arm_section_map *)
5722 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5723 * sizeof (elf32_arm_section_map));
5724 }
5725
5726 if (sec_data->map)
5727 {
5728 sec_data->map[newidx].vma = vma;
5729 sec_data->map[newidx].type = type;
5730 }
5731 }
5732
5733
5734 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5735 veneers are handled for now. */
5736
5737 static bfd_vma
5738 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5739 elf32_vfp11_erratum_list *branch,
5740 bfd *branch_bfd,
5741 asection *branch_sec,
5742 unsigned int offset)
5743 {
5744 asection *s;
5745 struct elf32_arm_link_hash_table *hash_table;
5746 char *tmp_name;
5747 struct elf_link_hash_entry *myh;
5748 struct bfd_link_hash_entry *bh;
5749 bfd_vma val;
5750 struct _arm_elf_section_data *sec_data;
5751 elf32_vfp11_erratum_list *newerr;
5752
5753 hash_table = elf32_arm_hash_table (link_info);
5754 BFD_ASSERT (hash_table != NULL);
5755 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5756
5757 s = bfd_get_section_by_name
5758 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5759
5760 sec_data = elf32_arm_section_data (s);
5761
5762 BFD_ASSERT (s != NULL);
5763
5764 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5765 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5766
5767 BFD_ASSERT (tmp_name);
5768
5769 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5770 hash_table->num_vfp11_fixes);
5771
5772 myh = elf_link_hash_lookup
5773 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5774
5775 BFD_ASSERT (myh == NULL);
5776
5777 bh = NULL;
5778 val = hash_table->vfp11_erratum_glue_size;
5779 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5780 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5781 NULL, TRUE, FALSE, &bh);
5782
5783 myh = (struct elf_link_hash_entry *) bh;
5784 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5785 myh->forced_local = 1;
5786
5787 /* Link veneer back to calling location. */
5788 sec_data->erratumcount += 1;
5789 newerr = (elf32_vfp11_erratum_list *)
5790 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5791
5792 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5793 newerr->vma = -1;
5794 newerr->u.v.branch = branch;
5795 newerr->u.v.id = hash_table->num_vfp11_fixes;
5796 branch->u.b.veneer = newerr;
5797
5798 newerr->next = sec_data->erratumlist;
5799 sec_data->erratumlist = newerr;
5800
5801 /* A symbol for the return from the veneer. */
5802 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5803 hash_table->num_vfp11_fixes);
5804
5805 myh = elf_link_hash_lookup
5806 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5807
5808 if (myh != NULL)
5809 abort ();
5810
5811 bh = NULL;
5812 val = offset + 4;
5813 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5814 branch_sec, val, NULL, TRUE, FALSE, &bh);
5815
5816 myh = (struct elf_link_hash_entry *) bh;
5817 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5818 myh->forced_local = 1;
5819
5820 free (tmp_name);
5821
5822 /* Generate a mapping symbol for the veneer section, and explicitly add an
5823 entry for that symbol to the code/data map for the section. */
5824 if (hash_table->vfp11_erratum_glue_size == 0)
5825 {
5826 bh = NULL;
5827 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5828 ever requires this erratum fix. */
5829 _bfd_generic_link_add_one_symbol (link_info,
5830 hash_table->bfd_of_glue_owner, "$a",
5831 BSF_LOCAL, s, 0, NULL,
5832 TRUE, FALSE, &bh);
5833
5834 myh = (struct elf_link_hash_entry *) bh;
5835 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5836 myh->forced_local = 1;
5837
5838 /* The elf32_arm_init_maps function only cares about symbols from input
5839 BFDs. We must make a note of this generated mapping symbol
5840 ourselves so that code byteswapping works properly in
5841 elf32_arm_write_section. */
5842 elf32_arm_section_map_add (s, 'a', 0);
5843 }
5844
5845 s->size += VFP11_ERRATUM_VENEER_SIZE;
5846 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5847 hash_table->num_vfp11_fixes++;
5848
5849 /* The offset of the veneer. */
5850 return val;
5851 }
5852
5853 #define ARM_GLUE_SECTION_FLAGS \
5854 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5855 | SEC_READONLY | SEC_LINKER_CREATED)
5856
5857 /* Create a fake section for use by the ARM backend of the linker. */
5858
5859 static bfd_boolean
5860 arm_make_glue_section (bfd * abfd, const char * name)
5861 {
5862 asection * sec;
5863
5864 sec = bfd_get_section_by_name (abfd, name);
5865 if (sec != NULL)
5866 /* Already made. */
5867 return TRUE;
5868
5869 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5870
5871 if (sec == NULL
5872 || !bfd_set_section_alignment (abfd, sec, 2))
5873 return FALSE;
5874
5875 /* Set the gc mark to prevent the section from being removed by garbage
5876 collection, despite the fact that no relocs refer to this section. */
5877 sec->gc_mark = 1;
5878
5879 return TRUE;
5880 }
5881
5882 /* Add the glue sections to ABFD. This function is called from the
5883 linker scripts in ld/emultempl/{armelf}.em. */
5884
5885 bfd_boolean
5886 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5887 struct bfd_link_info *info)
5888 {
5889 /* If we are only performing a partial
5890 link do not bother adding the glue. */
5891 if (info->relocatable)
5892 return TRUE;
5893
5894 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5895 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5896 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5897 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5898 }
5899
5900 /* Select a BFD to be used to hold the sections used by the glue code.
5901 This function is called from the linker scripts in ld/emultempl/
5902 {armelf/pe}.em. */
5903
5904 bfd_boolean
5905 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5906 {
5907 struct elf32_arm_link_hash_table *globals;
5908
5909 /* If we are only performing a partial link
5910 do not bother getting a bfd to hold the glue. */
5911 if (info->relocatable)
5912 return TRUE;
5913
5914 /* Make sure we don't attach the glue sections to a dynamic object. */
5915 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5916
5917 globals = elf32_arm_hash_table (info);
5918 BFD_ASSERT (globals != NULL);
5919
5920 if (globals->bfd_of_glue_owner != NULL)
5921 return TRUE;
5922
5923 /* Save the bfd for later use. */
5924 globals->bfd_of_glue_owner = abfd;
5925
5926 return TRUE;
5927 }
5928
5929 static void
5930 check_use_blx (struct elf32_arm_link_hash_table *globals)
5931 {
5932 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5933 Tag_CPU_arch) > 2)
5934 globals->use_blx = 1;
5935 }
5936
5937 bfd_boolean
5938 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5939 struct bfd_link_info *link_info)
5940 {
5941 Elf_Internal_Shdr *symtab_hdr;
5942 Elf_Internal_Rela *internal_relocs = NULL;
5943 Elf_Internal_Rela *irel, *irelend;
5944 bfd_byte *contents = NULL;
5945
5946 asection *sec;
5947 struct elf32_arm_link_hash_table *globals;
5948
5949 /* If we are only performing a partial link do not bother
5950 to construct any glue. */
5951 if (link_info->relocatable)
5952 return TRUE;
5953
5954 /* Here we have a bfd that is to be included on the link. We have a
5955 hook to do reloc rummaging, before section sizes are nailed down. */
5956 globals = elf32_arm_hash_table (link_info);
5957 BFD_ASSERT (globals != NULL);
5958
5959 check_use_blx (globals);
5960
5961 if (globals->byteswap_code && !bfd_big_endian (abfd))
5962 {
5963 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5964 abfd);
5965 return FALSE;
5966 }
5967
5968 /* PR 5398: If we have not decided to include any loadable sections in
5969 the output then we will not have a glue owner bfd. This is OK, it
5970 just means that there is nothing else for us to do here. */
5971 if (globals->bfd_of_glue_owner == NULL)
5972 return TRUE;
5973
5974 /* Rummage around all the relocs and map the glue vectors. */
5975 sec = abfd->sections;
5976
5977 if (sec == NULL)
5978 return TRUE;
5979
5980 for (; sec != NULL; sec = sec->next)
5981 {
5982 if (sec->reloc_count == 0)
5983 continue;
5984
5985 if ((sec->flags & SEC_EXCLUDE) != 0)
5986 continue;
5987
5988 symtab_hdr = & elf_symtab_hdr (abfd);
5989
5990 /* Load the relocs. */
5991 internal_relocs
5992 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5993
5994 if (internal_relocs == NULL)
5995 goto error_return;
5996
5997 irelend = internal_relocs + sec->reloc_count;
5998 for (irel = internal_relocs; irel < irelend; irel++)
5999 {
6000 long r_type;
6001 unsigned long r_index;
6002
6003 struct elf_link_hash_entry *h;
6004
6005 r_type = ELF32_R_TYPE (irel->r_info);
6006 r_index = ELF32_R_SYM (irel->r_info);
6007
6008 /* These are the only relocation types we care about. */
6009 if ( r_type != R_ARM_PC24
6010 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6011 continue;
6012
6013 /* Get the section contents if we haven't done so already. */
6014 if (contents == NULL)
6015 {
6016 /* Get cached copy if it exists. */
6017 if (elf_section_data (sec)->this_hdr.contents != NULL)
6018 contents = elf_section_data (sec)->this_hdr.contents;
6019 else
6020 {
6021 /* Go get them off disk. */
6022 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6023 goto error_return;
6024 }
6025 }
6026
6027 if (r_type == R_ARM_V4BX)
6028 {
6029 int reg;
6030
6031 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6032 record_arm_bx_glue (link_info, reg);
6033 continue;
6034 }
6035
6036 /* If the relocation is not against a symbol it cannot concern us. */
6037 h = NULL;
6038
6039 /* We don't care about local symbols. */
6040 if (r_index < symtab_hdr->sh_info)
6041 continue;
6042
6043 /* This is an external symbol. */
6044 r_index -= symtab_hdr->sh_info;
6045 h = (struct elf_link_hash_entry *)
6046 elf_sym_hashes (abfd)[r_index];
6047
6048 /* If the relocation is against a static symbol it must be within
6049 the current section and so cannot be a cross ARM/Thumb relocation. */
6050 if (h == NULL)
6051 continue;
6052
6053 /* If the call will go through a PLT entry then we do not need
6054 glue. */
6055 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6056 continue;
6057
6058 switch (r_type)
6059 {
6060 case R_ARM_PC24:
6061 /* This one is a call from arm code. We need to look up
6062 the target of the call. If it is a thumb target, we
6063 insert glue. */
6064 if (h->target_internal == ST_BRANCH_TO_THUMB)
6065 record_arm_to_thumb_glue (link_info, h);
6066 break;
6067
6068 default:
6069 abort ();
6070 }
6071 }
6072
6073 if (contents != NULL
6074 && elf_section_data (sec)->this_hdr.contents != contents)
6075 free (contents);
6076 contents = NULL;
6077
6078 if (internal_relocs != NULL
6079 && elf_section_data (sec)->relocs != internal_relocs)
6080 free (internal_relocs);
6081 internal_relocs = NULL;
6082 }
6083
6084 return TRUE;
6085
6086 error_return:
6087 if (contents != NULL
6088 && elf_section_data (sec)->this_hdr.contents != contents)
6089 free (contents);
6090 if (internal_relocs != NULL
6091 && elf_section_data (sec)->relocs != internal_relocs)
6092 free (internal_relocs);
6093
6094 return FALSE;
6095 }
6096 #endif
6097
6098
6099 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6100
6101 void
6102 bfd_elf32_arm_init_maps (bfd *abfd)
6103 {
6104 Elf_Internal_Sym *isymbuf;
6105 Elf_Internal_Shdr *hdr;
6106 unsigned int i, localsyms;
6107
6108 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6109 if (! is_arm_elf (abfd))
6110 return;
6111
6112 if ((abfd->flags & DYNAMIC) != 0)
6113 return;
6114
6115 hdr = & elf_symtab_hdr (abfd);
6116 localsyms = hdr->sh_info;
6117
6118 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6119 should contain the number of local symbols, which should come before any
6120 global symbols. Mapping symbols are always local. */
6121 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6122 NULL);
6123
6124 /* No internal symbols read? Skip this BFD. */
6125 if (isymbuf == NULL)
6126 return;
6127
6128 for (i = 0; i < localsyms; i++)
6129 {
6130 Elf_Internal_Sym *isym = &isymbuf[i];
6131 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6132 const char *name;
6133
6134 if (sec != NULL
6135 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6136 {
6137 name = bfd_elf_string_from_elf_section (abfd,
6138 hdr->sh_link, isym->st_name);
6139
6140 if (bfd_is_arm_special_symbol_name (name,
6141 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6142 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6143 }
6144 }
6145 }
6146
6147
6148 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6149 say what they wanted. */
6150
6151 void
6152 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6153 {
6154 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6155 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6156
6157 if (globals == NULL)
6158 return;
6159
6160 if (globals->fix_cortex_a8 == -1)
6161 {
6162 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6163 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6164 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6165 || out_attr[Tag_CPU_arch_profile].i == 0))
6166 globals->fix_cortex_a8 = 1;
6167 else
6168 globals->fix_cortex_a8 = 0;
6169 }
6170 }
6171
6172
6173 void
6174 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6175 {
6176 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6177 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6178
6179 if (globals == NULL)
6180 return;
6181 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6182 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6183 {
6184 switch (globals->vfp11_fix)
6185 {
6186 case BFD_ARM_VFP11_FIX_DEFAULT:
6187 case BFD_ARM_VFP11_FIX_NONE:
6188 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6189 break;
6190
6191 default:
6192 /* Give a warning, but do as the user requests anyway. */
6193 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6194 "workaround is not necessary for target architecture"), obfd);
6195 }
6196 }
6197 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6198 /* For earlier architectures, we might need the workaround, but do not
6199 enable it by default. If users is running with broken hardware, they
6200 must enable the erratum fix explicitly. */
6201 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6202 }
6203
6204
6205 enum bfd_arm_vfp11_pipe
6206 {
6207 VFP11_FMAC,
6208 VFP11_LS,
6209 VFP11_DS,
6210 VFP11_BAD
6211 };
6212
6213 /* Return a VFP register number. This is encoded as RX:X for single-precision
6214 registers, or X:RX for double-precision registers, where RX is the group of
6215 four bits in the instruction encoding and X is the single extension bit.
6216 RX and X fields are specified using their lowest (starting) bit. The return
6217 value is:
6218
6219 0...31: single-precision registers s0...s31
6220 32...63: double-precision registers d0...d31.
6221
6222 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6223 encounter VFP3 instructions, so we allow the full range for DP registers. */
6224
6225 static unsigned int
6226 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6227 unsigned int x)
6228 {
6229 if (is_double)
6230 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6231 else
6232 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6233 }
6234
6235 /* Set bits in *WMASK according to a register number REG as encoded by
6236 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6237
6238 static void
6239 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6240 {
6241 if (reg < 32)
6242 *wmask |= 1 << reg;
6243 else if (reg < 48)
6244 *wmask |= 3 << ((reg - 32) * 2);
6245 }
6246
6247 /* Return TRUE if WMASK overwrites anything in REGS. */
6248
6249 static bfd_boolean
6250 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6251 {
6252 int i;
6253
6254 for (i = 0; i < numregs; i++)
6255 {
6256 unsigned int reg = regs[i];
6257
6258 if (reg < 32 && (wmask & (1 << reg)) != 0)
6259 return TRUE;
6260
6261 reg -= 32;
6262
6263 if (reg >= 16)
6264 continue;
6265
6266 if ((wmask & (3 << (reg * 2))) != 0)
6267 return TRUE;
6268 }
6269
6270 return FALSE;
6271 }
6272
6273 /* In this function, we're interested in two things: finding input registers
6274 for VFP data-processing instructions, and finding the set of registers which
6275 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6276 hold the written set, so FLDM etc. are easy to deal with (we're only
6277 interested in 32 SP registers or 16 dp registers, due to the VFP version
6278 implemented by the chip in question). DP registers are marked by setting
6279 both SP registers in the write mask). */
6280
6281 static enum bfd_arm_vfp11_pipe
6282 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6283 int *numregs)
6284 {
6285 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6286 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6287
6288 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6289 {
6290 unsigned int pqrs;
6291 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6292 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6293
6294 pqrs = ((insn & 0x00800000) >> 20)
6295 | ((insn & 0x00300000) >> 19)
6296 | ((insn & 0x00000040) >> 6);
6297
6298 switch (pqrs)
6299 {
6300 case 0: /* fmac[sd]. */
6301 case 1: /* fnmac[sd]. */
6302 case 2: /* fmsc[sd]. */
6303 case 3: /* fnmsc[sd]. */
6304 vpipe = VFP11_FMAC;
6305 bfd_arm_vfp11_write_mask (destmask, fd);
6306 regs[0] = fd;
6307 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6308 regs[2] = fm;
6309 *numregs = 3;
6310 break;
6311
6312 case 4: /* fmul[sd]. */
6313 case 5: /* fnmul[sd]. */
6314 case 6: /* fadd[sd]. */
6315 case 7: /* fsub[sd]. */
6316 vpipe = VFP11_FMAC;
6317 goto vfp_binop;
6318
6319 case 8: /* fdiv[sd]. */
6320 vpipe = VFP11_DS;
6321 vfp_binop:
6322 bfd_arm_vfp11_write_mask (destmask, fd);
6323 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6324 regs[1] = fm;
6325 *numregs = 2;
6326 break;
6327
6328 case 15: /* extended opcode. */
6329 {
6330 unsigned int extn = ((insn >> 15) & 0x1e)
6331 | ((insn >> 7) & 1);
6332
6333 switch (extn)
6334 {
6335 case 0: /* fcpy[sd]. */
6336 case 1: /* fabs[sd]. */
6337 case 2: /* fneg[sd]. */
6338 case 8: /* fcmp[sd]. */
6339 case 9: /* fcmpe[sd]. */
6340 case 10: /* fcmpz[sd]. */
6341 case 11: /* fcmpez[sd]. */
6342 case 16: /* fuito[sd]. */
6343 case 17: /* fsito[sd]. */
6344 case 24: /* ftoui[sd]. */
6345 case 25: /* ftouiz[sd]. */
6346 case 26: /* ftosi[sd]. */
6347 case 27: /* ftosiz[sd]. */
6348 /* These instructions will not bounce due to underflow. */
6349 *numregs = 0;
6350 vpipe = VFP11_FMAC;
6351 break;
6352
6353 case 3: /* fsqrt[sd]. */
6354 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6355 registers to cause the erratum in previous instructions. */
6356 bfd_arm_vfp11_write_mask (destmask, fd);
6357 vpipe = VFP11_DS;
6358 break;
6359
6360 case 15: /* fcvt{ds,sd}. */
6361 {
6362 int rnum = 0;
6363
6364 bfd_arm_vfp11_write_mask (destmask, fd);
6365
6366 /* Only FCVTSD can underflow. */
6367 if ((insn & 0x100) != 0)
6368 regs[rnum++] = fm;
6369
6370 *numregs = rnum;
6371
6372 vpipe = VFP11_FMAC;
6373 }
6374 break;
6375
6376 default:
6377 return VFP11_BAD;
6378 }
6379 }
6380 break;
6381
6382 default:
6383 return VFP11_BAD;
6384 }
6385 }
6386 /* Two-register transfer. */
6387 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6388 {
6389 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6390
6391 if ((insn & 0x100000) == 0)
6392 {
6393 if (is_double)
6394 bfd_arm_vfp11_write_mask (destmask, fm);
6395 else
6396 {
6397 bfd_arm_vfp11_write_mask (destmask, fm);
6398 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6399 }
6400 }
6401
6402 vpipe = VFP11_LS;
6403 }
6404 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6405 {
6406 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6407 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6408
6409 switch (puw)
6410 {
6411 case 0: /* Two-reg transfer. We should catch these above. */
6412 abort ();
6413
6414 case 2: /* fldm[sdx]. */
6415 case 3:
6416 case 5:
6417 {
6418 unsigned int i, offset = insn & 0xff;
6419
6420 if (is_double)
6421 offset >>= 1;
6422
6423 for (i = fd; i < fd + offset; i++)
6424 bfd_arm_vfp11_write_mask (destmask, i);
6425 }
6426 break;
6427
6428 case 4: /* fld[sd]. */
6429 case 6:
6430 bfd_arm_vfp11_write_mask (destmask, fd);
6431 break;
6432
6433 default:
6434 return VFP11_BAD;
6435 }
6436
6437 vpipe = VFP11_LS;
6438 }
6439 /* Single-register transfer. Note L==0. */
6440 else if ((insn & 0x0f100e10) == 0x0e000a10)
6441 {
6442 unsigned int opcode = (insn >> 21) & 7;
6443 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6444
6445 switch (opcode)
6446 {
6447 case 0: /* fmsr/fmdlr. */
6448 case 1: /* fmdhr. */
6449 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6450 destination register. I don't know if this is exactly right,
6451 but it is the conservative choice. */
6452 bfd_arm_vfp11_write_mask (destmask, fn);
6453 break;
6454
6455 case 7: /* fmxr. */
6456 break;
6457 }
6458
6459 vpipe = VFP11_LS;
6460 }
6461
6462 return vpipe;
6463 }
6464
6465
6466 static int elf32_arm_compare_mapping (const void * a, const void * b);
6467
6468
6469 /* Look for potentially-troublesome code sequences which might trigger the
6470 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6471 (available from ARM) for details of the erratum. A short version is
6472 described in ld.texinfo. */
6473
6474 bfd_boolean
6475 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6476 {
6477 asection *sec;
6478 bfd_byte *contents = NULL;
6479 int state = 0;
6480 int regs[3], numregs = 0;
6481 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6482 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6483
6484 if (globals == NULL)
6485 return FALSE;
6486
6487 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6488 The states transition as follows:
6489
6490 0 -> 1 (vector) or 0 -> 2 (scalar)
6491 A VFP FMAC-pipeline instruction has been seen. Fill
6492 regs[0]..regs[numregs-1] with its input operands. Remember this
6493 instruction in 'first_fmac'.
6494
6495 1 -> 2
6496 Any instruction, except for a VFP instruction which overwrites
6497 regs[*].
6498
6499 1 -> 3 [ -> 0 ] or
6500 2 -> 3 [ -> 0 ]
6501 A VFP instruction has been seen which overwrites any of regs[*].
6502 We must make a veneer! Reset state to 0 before examining next
6503 instruction.
6504
6505 2 -> 0
6506 If we fail to match anything in state 2, reset to state 0 and reset
6507 the instruction pointer to the instruction after 'first_fmac'.
6508
6509 If the VFP11 vector mode is in use, there must be at least two unrelated
6510 instructions between anti-dependent VFP11 instructions to properly avoid
6511 triggering the erratum, hence the use of the extra state 1. */
6512
6513 /* If we are only performing a partial link do not bother
6514 to construct any glue. */
6515 if (link_info->relocatable)
6516 return TRUE;
6517
6518 /* Skip if this bfd does not correspond to an ELF image. */
6519 if (! is_arm_elf (abfd))
6520 return TRUE;
6521
6522 /* We should have chosen a fix type by the time we get here. */
6523 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6524
6525 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6526 return TRUE;
6527
6528 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6529 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6530 return TRUE;
6531
6532 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6533 {
6534 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6535 struct _arm_elf_section_data *sec_data;
6536
6537 /* If we don't have executable progbits, we're not interested in this
6538 section. Also skip if section is to be excluded. */
6539 if (elf_section_type (sec) != SHT_PROGBITS
6540 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6541 || (sec->flags & SEC_EXCLUDE) != 0
6542 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6543 || sec->output_section == bfd_abs_section_ptr
6544 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6545 continue;
6546
6547 sec_data = elf32_arm_section_data (sec);
6548
6549 if (sec_data->mapcount == 0)
6550 continue;
6551
6552 if (elf_section_data (sec)->this_hdr.contents != NULL)
6553 contents = elf_section_data (sec)->this_hdr.contents;
6554 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6555 goto error_return;
6556
6557 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6558 elf32_arm_compare_mapping);
6559
6560 for (span = 0; span < sec_data->mapcount; span++)
6561 {
6562 unsigned int span_start = sec_data->map[span].vma;
6563 unsigned int span_end = (span == sec_data->mapcount - 1)
6564 ? sec->size : sec_data->map[span + 1].vma;
6565 char span_type = sec_data->map[span].type;
6566
6567 /* FIXME: Only ARM mode is supported at present. We may need to
6568 support Thumb-2 mode also at some point. */
6569 if (span_type != 'a')
6570 continue;
6571
6572 for (i = span_start; i < span_end;)
6573 {
6574 unsigned int next_i = i + 4;
6575 unsigned int insn = bfd_big_endian (abfd)
6576 ? (contents[i] << 24)
6577 | (contents[i + 1] << 16)
6578 | (contents[i + 2] << 8)
6579 | contents[i + 3]
6580 : (contents[i + 3] << 24)
6581 | (contents[i + 2] << 16)
6582 | (contents[i + 1] << 8)
6583 | contents[i];
6584 unsigned int writemask = 0;
6585 enum bfd_arm_vfp11_pipe vpipe;
6586
6587 switch (state)
6588 {
6589 case 0:
6590 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6591 &numregs);
6592 /* I'm assuming the VFP11 erratum can trigger with denorm
6593 operands on either the FMAC or the DS pipeline. This might
6594 lead to slightly overenthusiastic veneer insertion. */
6595 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6596 {
6597 state = use_vector ? 1 : 2;
6598 first_fmac = i;
6599 veneer_of_insn = insn;
6600 }
6601 break;
6602
6603 case 1:
6604 {
6605 int other_regs[3], other_numregs;
6606 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6607 other_regs,
6608 &other_numregs);
6609 if (vpipe != VFP11_BAD
6610 && bfd_arm_vfp11_antidependency (writemask, regs,
6611 numregs))
6612 state = 3;
6613 else
6614 state = 2;
6615 }
6616 break;
6617
6618 case 2:
6619 {
6620 int other_regs[3], other_numregs;
6621 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6622 other_regs,
6623 &other_numregs);
6624 if (vpipe != VFP11_BAD
6625 && bfd_arm_vfp11_antidependency (writemask, regs,
6626 numregs))
6627 state = 3;
6628 else
6629 {
6630 state = 0;
6631 next_i = first_fmac + 4;
6632 }
6633 }
6634 break;
6635
6636 case 3:
6637 abort (); /* Should be unreachable. */
6638 }
6639
6640 if (state == 3)
6641 {
6642 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6643 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6644
6645 elf32_arm_section_data (sec)->erratumcount += 1;
6646
6647 newerr->u.b.vfp_insn = veneer_of_insn;
6648
6649 switch (span_type)
6650 {
6651 case 'a':
6652 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6653 break;
6654
6655 default:
6656 abort ();
6657 }
6658
6659 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6660 first_fmac);
6661
6662 newerr->vma = -1;
6663
6664 newerr->next = sec_data->erratumlist;
6665 sec_data->erratumlist = newerr;
6666
6667 state = 0;
6668 }
6669
6670 i = next_i;
6671 }
6672 }
6673
6674 if (contents != NULL
6675 && elf_section_data (sec)->this_hdr.contents != contents)
6676 free (contents);
6677 contents = NULL;
6678 }
6679
6680 return TRUE;
6681
6682 error_return:
6683 if (contents != NULL
6684 && elf_section_data (sec)->this_hdr.contents != contents)
6685 free (contents);
6686
6687 return FALSE;
6688 }
6689
6690 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6691 after sections have been laid out, using specially-named symbols. */
6692
6693 void
6694 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6695 struct bfd_link_info *link_info)
6696 {
6697 asection *sec;
6698 struct elf32_arm_link_hash_table *globals;
6699 char *tmp_name;
6700
6701 if (link_info->relocatable)
6702 return;
6703
6704 /* Skip if this bfd does not correspond to an ELF image. */
6705 if (! is_arm_elf (abfd))
6706 return;
6707
6708 globals = elf32_arm_hash_table (link_info);
6709 if (globals == NULL)
6710 return;
6711
6712 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6713 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6714
6715 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6716 {
6717 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6718 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6719
6720 for (; errnode != NULL; errnode = errnode->next)
6721 {
6722 struct elf_link_hash_entry *myh;
6723 bfd_vma vma;
6724
6725 switch (errnode->type)
6726 {
6727 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6728 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6729 /* Find veneer symbol. */
6730 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6731 errnode->u.b.veneer->u.v.id);
6732
6733 myh = elf_link_hash_lookup
6734 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6735
6736 if (myh == NULL)
6737 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6738 "`%s'"), abfd, tmp_name);
6739
6740 vma = myh->root.u.def.section->output_section->vma
6741 + myh->root.u.def.section->output_offset
6742 + myh->root.u.def.value;
6743
6744 errnode->u.b.veneer->vma = vma;
6745 break;
6746
6747 case VFP11_ERRATUM_ARM_VENEER:
6748 case VFP11_ERRATUM_THUMB_VENEER:
6749 /* Find return location. */
6750 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6751 errnode->u.v.id);
6752
6753 myh = elf_link_hash_lookup
6754 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6755
6756 if (myh == NULL)
6757 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6758 "`%s'"), abfd, tmp_name);
6759
6760 vma = myh->root.u.def.section->output_section->vma
6761 + myh->root.u.def.section->output_offset
6762 + myh->root.u.def.value;
6763
6764 errnode->u.v.branch->vma = vma;
6765 break;
6766
6767 default:
6768 abort ();
6769 }
6770 }
6771 }
6772
6773 free (tmp_name);
6774 }
6775
6776
6777 /* Set target relocation values needed during linking. */
6778
6779 void
6780 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6781 struct bfd_link_info *link_info,
6782 int target1_is_rel,
6783 char * target2_type,
6784 int fix_v4bx,
6785 int use_blx,
6786 bfd_arm_vfp11_fix vfp11_fix,
6787 int no_enum_warn, int no_wchar_warn,
6788 int pic_veneer, int fix_cortex_a8)
6789 {
6790 struct elf32_arm_link_hash_table *globals;
6791
6792 globals = elf32_arm_hash_table (link_info);
6793 if (globals == NULL)
6794 return;
6795
6796 globals->target1_is_rel = target1_is_rel;
6797 if (strcmp (target2_type, "rel") == 0)
6798 globals->target2_reloc = R_ARM_REL32;
6799 else if (strcmp (target2_type, "abs") == 0)
6800 globals->target2_reloc = R_ARM_ABS32;
6801 else if (strcmp (target2_type, "got-rel") == 0)
6802 globals->target2_reloc = R_ARM_GOT_PREL;
6803 else
6804 {
6805 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6806 target2_type);
6807 }
6808 globals->fix_v4bx = fix_v4bx;
6809 globals->use_blx |= use_blx;
6810 globals->vfp11_fix = vfp11_fix;
6811 globals->pic_veneer = pic_veneer;
6812 globals->fix_cortex_a8 = fix_cortex_a8;
6813
6814 BFD_ASSERT (is_arm_elf (output_bfd));
6815 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6816 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6817 }
6818
6819 /* Replace the target offset of a Thumb bl or b.w instruction. */
6820
6821 static void
6822 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6823 {
6824 bfd_vma upper;
6825 bfd_vma lower;
6826 int reloc_sign;
6827
6828 BFD_ASSERT ((offset & 1) == 0);
6829
6830 upper = bfd_get_16 (abfd, insn);
6831 lower = bfd_get_16 (abfd, insn + 2);
6832 reloc_sign = (offset < 0) ? 1 : 0;
6833 upper = (upper & ~(bfd_vma) 0x7ff)
6834 | ((offset >> 12) & 0x3ff)
6835 | (reloc_sign << 10);
6836 lower = (lower & ~(bfd_vma) 0x2fff)
6837 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6838 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6839 | ((offset >> 1) & 0x7ff);
6840 bfd_put_16 (abfd, upper, insn);
6841 bfd_put_16 (abfd, lower, insn + 2);
6842 }
6843
6844 /* Thumb code calling an ARM function. */
6845
6846 static int
6847 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6848 const char * name,
6849 bfd * input_bfd,
6850 bfd * output_bfd,
6851 asection * input_section,
6852 bfd_byte * hit_data,
6853 asection * sym_sec,
6854 bfd_vma offset,
6855 bfd_signed_vma addend,
6856 bfd_vma val,
6857 char **error_message)
6858 {
6859 asection * s = 0;
6860 bfd_vma my_offset;
6861 long int ret_offset;
6862 struct elf_link_hash_entry * myh;
6863 struct elf32_arm_link_hash_table * globals;
6864
6865 myh = find_thumb_glue (info, name, error_message);
6866 if (myh == NULL)
6867 return FALSE;
6868
6869 globals = elf32_arm_hash_table (info);
6870 BFD_ASSERT (globals != NULL);
6871 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6872
6873 my_offset = myh->root.u.def.value;
6874
6875 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6876 THUMB2ARM_GLUE_SECTION_NAME);
6877
6878 BFD_ASSERT (s != NULL);
6879 BFD_ASSERT (s->contents != NULL);
6880 BFD_ASSERT (s->output_section != NULL);
6881
6882 if ((my_offset & 0x01) == 0x01)
6883 {
6884 if (sym_sec != NULL
6885 && sym_sec->owner != NULL
6886 && !INTERWORK_FLAG (sym_sec->owner))
6887 {
6888 (*_bfd_error_handler)
6889 (_("%B(%s): warning: interworking not enabled.\n"
6890 " first occurrence: %B: thumb call to arm"),
6891 sym_sec->owner, input_bfd, name);
6892
6893 return FALSE;
6894 }
6895
6896 --my_offset;
6897 myh->root.u.def.value = my_offset;
6898
6899 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6900 s->contents + my_offset);
6901
6902 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6903 s->contents + my_offset + 2);
6904
6905 ret_offset =
6906 /* Address of destination of the stub. */
6907 ((bfd_signed_vma) val)
6908 - ((bfd_signed_vma)
6909 /* Offset from the start of the current section
6910 to the start of the stubs. */
6911 (s->output_offset
6912 /* Offset of the start of this stub from the start of the stubs. */
6913 + my_offset
6914 /* Address of the start of the current section. */
6915 + s->output_section->vma)
6916 /* The branch instruction is 4 bytes into the stub. */
6917 + 4
6918 /* ARM branches work from the pc of the instruction + 8. */
6919 + 8);
6920
6921 put_arm_insn (globals, output_bfd,
6922 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6923 s->contents + my_offset + 4);
6924 }
6925
6926 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6927
6928 /* Now go back and fix up the original BL insn to point to here. */
6929 ret_offset =
6930 /* Address of where the stub is located. */
6931 (s->output_section->vma + s->output_offset + my_offset)
6932 /* Address of where the BL is located. */
6933 - (input_section->output_section->vma + input_section->output_offset
6934 + offset)
6935 /* Addend in the relocation. */
6936 - addend
6937 /* Biassing for PC-relative addressing. */
6938 - 8;
6939
6940 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6941
6942 return TRUE;
6943 }
6944
6945 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6946
6947 static struct elf_link_hash_entry *
6948 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6949 const char * name,
6950 bfd * input_bfd,
6951 bfd * output_bfd,
6952 asection * sym_sec,
6953 bfd_vma val,
6954 asection * s,
6955 char ** error_message)
6956 {
6957 bfd_vma my_offset;
6958 long int ret_offset;
6959 struct elf_link_hash_entry * myh;
6960 struct elf32_arm_link_hash_table * globals;
6961
6962 myh = find_arm_glue (info, name, error_message);
6963 if (myh == NULL)
6964 return NULL;
6965
6966 globals = elf32_arm_hash_table (info);
6967 BFD_ASSERT (globals != NULL);
6968 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6969
6970 my_offset = myh->root.u.def.value;
6971
6972 if ((my_offset & 0x01) == 0x01)
6973 {
6974 if (sym_sec != NULL
6975 && sym_sec->owner != NULL
6976 && !INTERWORK_FLAG (sym_sec->owner))
6977 {
6978 (*_bfd_error_handler)
6979 (_("%B(%s): warning: interworking not enabled.\n"
6980 " first occurrence: %B: arm call to thumb"),
6981 sym_sec->owner, input_bfd, name);
6982 }
6983
6984 --my_offset;
6985 myh->root.u.def.value = my_offset;
6986
6987 if (info->shared || globals->root.is_relocatable_executable
6988 || globals->pic_veneer)
6989 {
6990 /* For relocatable objects we can't use absolute addresses,
6991 so construct the address from a relative offset. */
6992 /* TODO: If the offset is small it's probably worth
6993 constructing the address with adds. */
6994 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6995 s->contents + my_offset);
6996 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6997 s->contents + my_offset + 4);
6998 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6999 s->contents + my_offset + 8);
7000 /* Adjust the offset by 4 for the position of the add,
7001 and 8 for the pipeline offset. */
7002 ret_offset = (val - (s->output_offset
7003 + s->output_section->vma
7004 + my_offset + 12))
7005 | 1;
7006 bfd_put_32 (output_bfd, ret_offset,
7007 s->contents + my_offset + 12);
7008 }
7009 else if (globals->use_blx)
7010 {
7011 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7012 s->contents + my_offset);
7013
7014 /* It's a thumb address. Add the low order bit. */
7015 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7016 s->contents + my_offset + 4);
7017 }
7018 else
7019 {
7020 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7021 s->contents + my_offset);
7022
7023 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7024 s->contents + my_offset + 4);
7025
7026 /* It's a thumb address. Add the low order bit. */
7027 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7028 s->contents + my_offset + 8);
7029
7030 my_offset += 12;
7031 }
7032 }
7033
7034 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7035
7036 return myh;
7037 }
7038
7039 /* Arm code calling a Thumb function. */
7040
7041 static int
7042 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7043 const char * name,
7044 bfd * input_bfd,
7045 bfd * output_bfd,
7046 asection * input_section,
7047 bfd_byte * hit_data,
7048 asection * sym_sec,
7049 bfd_vma offset,
7050 bfd_signed_vma addend,
7051 bfd_vma val,
7052 char **error_message)
7053 {
7054 unsigned long int tmp;
7055 bfd_vma my_offset;
7056 asection * s;
7057 long int ret_offset;
7058 struct elf_link_hash_entry * myh;
7059 struct elf32_arm_link_hash_table * globals;
7060
7061 globals = elf32_arm_hash_table (info);
7062 BFD_ASSERT (globals != NULL);
7063 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7064
7065 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7066 ARM2THUMB_GLUE_SECTION_NAME);
7067 BFD_ASSERT (s != NULL);
7068 BFD_ASSERT (s->contents != NULL);
7069 BFD_ASSERT (s->output_section != NULL);
7070
7071 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7072 sym_sec, val, s, error_message);
7073 if (!myh)
7074 return FALSE;
7075
7076 my_offset = myh->root.u.def.value;
7077 tmp = bfd_get_32 (input_bfd, hit_data);
7078 tmp = tmp & 0xFF000000;
7079
7080 /* Somehow these are both 4 too far, so subtract 8. */
7081 ret_offset = (s->output_offset
7082 + my_offset
7083 + s->output_section->vma
7084 - (input_section->output_offset
7085 + input_section->output_section->vma
7086 + offset + addend)
7087 - 8);
7088
7089 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7090
7091 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7092
7093 return TRUE;
7094 }
7095
7096 /* Populate Arm stub for an exported Thumb function. */
7097
7098 static bfd_boolean
7099 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7100 {
7101 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7102 asection * s;
7103 struct elf_link_hash_entry * myh;
7104 struct elf32_arm_link_hash_entry *eh;
7105 struct elf32_arm_link_hash_table * globals;
7106 asection *sec;
7107 bfd_vma val;
7108 char *error_message;
7109
7110 eh = elf32_arm_hash_entry (h);
7111 /* Allocate stubs for exported Thumb functions on v4t. */
7112 if (eh->export_glue == NULL)
7113 return TRUE;
7114
7115 globals = elf32_arm_hash_table (info);
7116 BFD_ASSERT (globals != NULL);
7117 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7118
7119 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7120 ARM2THUMB_GLUE_SECTION_NAME);
7121 BFD_ASSERT (s != NULL);
7122 BFD_ASSERT (s->contents != NULL);
7123 BFD_ASSERT (s->output_section != NULL);
7124
7125 sec = eh->export_glue->root.u.def.section;
7126
7127 BFD_ASSERT (sec->output_section != NULL);
7128
7129 val = eh->export_glue->root.u.def.value + sec->output_offset
7130 + sec->output_section->vma;
7131
7132 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7133 h->root.u.def.section->owner,
7134 globals->obfd, sec, val, s,
7135 &error_message);
7136 BFD_ASSERT (myh);
7137 return TRUE;
7138 }
7139
7140 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7141
7142 static bfd_vma
7143 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7144 {
7145 bfd_byte *p;
7146 bfd_vma glue_addr;
7147 asection *s;
7148 struct elf32_arm_link_hash_table *globals;
7149
7150 globals = elf32_arm_hash_table (info);
7151 BFD_ASSERT (globals != NULL);
7152 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7153
7154 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7155 ARM_BX_GLUE_SECTION_NAME);
7156 BFD_ASSERT (s != NULL);
7157 BFD_ASSERT (s->contents != NULL);
7158 BFD_ASSERT (s->output_section != NULL);
7159
7160 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7161
7162 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7163
7164 if ((globals->bx_glue_offset[reg] & 1) == 0)
7165 {
7166 p = s->contents + glue_addr;
7167 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7168 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7169 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7170 globals->bx_glue_offset[reg] |= 1;
7171 }
7172
7173 return glue_addr + s->output_section->vma + s->output_offset;
7174 }
7175
7176 /* Generate Arm stubs for exported Thumb symbols. */
7177 static void
7178 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7179 struct bfd_link_info *link_info)
7180 {
7181 struct elf32_arm_link_hash_table * globals;
7182
7183 if (link_info == NULL)
7184 /* Ignore this if we are not called by the ELF backend linker. */
7185 return;
7186
7187 globals = elf32_arm_hash_table (link_info);
7188 if (globals == NULL)
7189 return;
7190
7191 /* If blx is available then exported Thumb symbols are OK and there is
7192 nothing to do. */
7193 if (globals->use_blx)
7194 return;
7195
7196 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7197 link_info);
7198 }
7199
7200 /* Reserve space for COUNT dynamic relocations in relocation selection
7201 SRELOC. */
7202
7203 static void
7204 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7205 bfd_size_type count)
7206 {
7207 struct elf32_arm_link_hash_table *htab;
7208
7209 htab = elf32_arm_hash_table (info);
7210 BFD_ASSERT (htab->root.dynamic_sections_created);
7211 if (sreloc == NULL)
7212 abort ();
7213 sreloc->size += RELOC_SIZE (htab) * count;
7214 }
7215
7216 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7217 dynamic, the relocations should go in SRELOC, otherwise they should
7218 go in the special .rel.iplt section. */
7219
7220 static void
7221 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7222 bfd_size_type count)
7223 {
7224 struct elf32_arm_link_hash_table *htab;
7225
7226 htab = elf32_arm_hash_table (info);
7227 if (!htab->root.dynamic_sections_created)
7228 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7229 else
7230 {
7231 BFD_ASSERT (sreloc != NULL);
7232 sreloc->size += RELOC_SIZE (htab) * count;
7233 }
7234 }
7235
7236 /* Add relocation REL to the end of relocation section SRELOC. */
7237
7238 static void
7239 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7240 asection *sreloc, Elf_Internal_Rela *rel)
7241 {
7242 bfd_byte *loc;
7243 struct elf32_arm_link_hash_table *htab;
7244
7245 htab = elf32_arm_hash_table (info);
7246 if (!htab->root.dynamic_sections_created
7247 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7248 sreloc = htab->root.irelplt;
7249 if (sreloc == NULL)
7250 abort ();
7251 loc = sreloc->contents;
7252 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7253 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7254 abort ();
7255 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7256 }
7257
7258 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7259 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7260 to .plt. */
7261
7262 static void
7263 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7264 bfd_boolean is_iplt_entry,
7265 union gotplt_union *root_plt,
7266 struct arm_plt_info *arm_plt)
7267 {
7268 struct elf32_arm_link_hash_table *htab;
7269 asection *splt;
7270 asection *sgotplt;
7271
7272 htab = elf32_arm_hash_table (info);
7273
7274 if (is_iplt_entry)
7275 {
7276 splt = htab->root.iplt;
7277 sgotplt = htab->root.igotplt;
7278
7279 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7280 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7281 }
7282 else
7283 {
7284 splt = htab->root.splt;
7285 sgotplt = htab->root.sgotplt;
7286
7287 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7288 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7289
7290 /* If this is the first .plt entry, make room for the special
7291 first entry. */
7292 if (splt->size == 0)
7293 splt->size += htab->plt_header_size;
7294 }
7295
7296 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7297 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7298 splt->size += PLT_THUMB_STUB_SIZE;
7299 root_plt->offset = splt->size;
7300 splt->size += htab->plt_entry_size;
7301
7302 if (!htab->symbian_p)
7303 {
7304 /* We also need to make an entry in the .got.plt section, which
7305 will be placed in the .got section by the linker script. */
7306 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7307 sgotplt->size += 4;
7308 }
7309 }
7310
7311 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7312 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7313 Otherwise, DYNINDX is the index of the symbol in the dynamic
7314 symbol table and SYM_VALUE is undefined.
7315
7316 ROOT_PLT points to the offset of the PLT entry from the start of its
7317 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7318 bookkeeping information. */
7319
7320 static void
7321 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7322 union gotplt_union *root_plt,
7323 struct arm_plt_info *arm_plt,
7324 int dynindx, bfd_vma sym_value)
7325 {
7326 struct elf32_arm_link_hash_table *htab;
7327 asection *sgot;
7328 asection *splt;
7329 asection *srel;
7330 bfd_byte *loc;
7331 bfd_vma plt_index;
7332 Elf_Internal_Rela rel;
7333 bfd_vma plt_header_size;
7334 bfd_vma got_header_size;
7335
7336 htab = elf32_arm_hash_table (info);
7337
7338 /* Pick the appropriate sections and sizes. */
7339 if (dynindx == -1)
7340 {
7341 splt = htab->root.iplt;
7342 sgot = htab->root.igotplt;
7343 srel = htab->root.irelplt;
7344
7345 /* There are no reserved entries in .igot.plt, and no special
7346 first entry in .iplt. */
7347 got_header_size = 0;
7348 plt_header_size = 0;
7349 }
7350 else
7351 {
7352 splt = htab->root.splt;
7353 sgot = htab->root.sgotplt;
7354 srel = htab->root.srelplt;
7355
7356 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7357 plt_header_size = htab->plt_header_size;
7358 }
7359 BFD_ASSERT (splt != NULL && srel != NULL);
7360
7361 /* Fill in the entry in the procedure linkage table. */
7362 if (htab->symbian_p)
7363 {
7364 BFD_ASSERT (dynindx >= 0);
7365 put_arm_insn (htab, output_bfd,
7366 elf32_arm_symbian_plt_entry[0],
7367 splt->contents + root_plt->offset);
7368 bfd_put_32 (output_bfd,
7369 elf32_arm_symbian_plt_entry[1],
7370 splt->contents + root_plt->offset + 4);
7371
7372 /* Fill in the entry in the .rel.plt section. */
7373 rel.r_offset = (splt->output_section->vma
7374 + splt->output_offset
7375 + root_plt->offset + 4);
7376 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7377
7378 /* Get the index in the procedure linkage table which
7379 corresponds to this symbol. This is the index of this symbol
7380 in all the symbols for which we are making plt entries. The
7381 first entry in the procedure linkage table is reserved. */
7382 plt_index = ((root_plt->offset - plt_header_size)
7383 / htab->plt_entry_size);
7384 }
7385 else
7386 {
7387 bfd_vma got_offset, got_address, plt_address;
7388 bfd_vma got_displacement, initial_got_entry;
7389 bfd_byte * ptr;
7390
7391 BFD_ASSERT (sgot != NULL);
7392
7393 /* Get the offset into the .(i)got.plt table of the entry that
7394 corresponds to this function. */
7395 got_offset = (arm_plt->got_offset & -2);
7396
7397 /* Get the index in the procedure linkage table which
7398 corresponds to this symbol. This is the index of this symbol
7399 in all the symbols for which we are making plt entries.
7400 After the reserved .got.plt entries, all symbols appear in
7401 the same order as in .plt. */
7402 plt_index = (got_offset - got_header_size) / 4;
7403
7404 /* Calculate the address of the GOT entry. */
7405 got_address = (sgot->output_section->vma
7406 + sgot->output_offset
7407 + got_offset);
7408
7409 /* ...and the address of the PLT entry. */
7410 plt_address = (splt->output_section->vma
7411 + splt->output_offset
7412 + root_plt->offset);
7413
7414 ptr = splt->contents + root_plt->offset;
7415 if (htab->vxworks_p && info->shared)
7416 {
7417 unsigned int i;
7418 bfd_vma val;
7419
7420 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7421 {
7422 val = elf32_arm_vxworks_shared_plt_entry[i];
7423 if (i == 2)
7424 val |= got_address - sgot->output_section->vma;
7425 if (i == 5)
7426 val |= plt_index * RELOC_SIZE (htab);
7427 if (i == 2 || i == 5)
7428 bfd_put_32 (output_bfd, val, ptr);
7429 else
7430 put_arm_insn (htab, output_bfd, val, ptr);
7431 }
7432 }
7433 else if (htab->vxworks_p)
7434 {
7435 unsigned int i;
7436 bfd_vma val;
7437
7438 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7439 {
7440 val = elf32_arm_vxworks_exec_plt_entry[i];
7441 if (i == 2)
7442 val |= got_address;
7443 if (i == 4)
7444 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7445 if (i == 5)
7446 val |= plt_index * RELOC_SIZE (htab);
7447 if (i == 2 || i == 5)
7448 bfd_put_32 (output_bfd, val, ptr);
7449 else
7450 put_arm_insn (htab, output_bfd, val, ptr);
7451 }
7452
7453 loc = (htab->srelplt2->contents
7454 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7455
7456 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7457 referencing the GOT for this PLT entry. */
7458 rel.r_offset = plt_address + 8;
7459 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7460 rel.r_addend = got_offset;
7461 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7462 loc += RELOC_SIZE (htab);
7463
7464 /* Create the R_ARM_ABS32 relocation referencing the
7465 beginning of the PLT for this GOT entry. */
7466 rel.r_offset = got_address;
7467 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7468 rel.r_addend = 0;
7469 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7470 }
7471 else
7472 {
7473 /* Calculate the displacement between the PLT slot and the
7474 entry in the GOT. The eight-byte offset accounts for the
7475 value produced by adding to pc in the first instruction
7476 of the PLT stub. */
7477 got_displacement = got_address - (plt_address + 8);
7478
7479 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7480
7481 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7482 {
7483 put_thumb_insn (htab, output_bfd,
7484 elf32_arm_plt_thumb_stub[0], ptr - 4);
7485 put_thumb_insn (htab, output_bfd,
7486 elf32_arm_plt_thumb_stub[1], ptr - 2);
7487 }
7488
7489 put_arm_insn (htab, output_bfd,
7490 elf32_arm_plt_entry[0]
7491 | ((got_displacement & 0x0ff00000) >> 20),
7492 ptr + 0);
7493 put_arm_insn (htab, output_bfd,
7494 elf32_arm_plt_entry[1]
7495 | ((got_displacement & 0x000ff000) >> 12),
7496 ptr+ 4);
7497 put_arm_insn (htab, output_bfd,
7498 elf32_arm_plt_entry[2]
7499 | (got_displacement & 0x00000fff),
7500 ptr + 8);
7501 #ifdef FOUR_WORD_PLT
7502 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
7503 #endif
7504 }
7505
7506 /* Fill in the entry in the .rel(a).(i)plt section. */
7507 rel.r_offset = got_address;
7508 rel.r_addend = 0;
7509 if (dynindx == -1)
7510 {
7511 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7512 The dynamic linker or static executable then calls SYM_VALUE
7513 to determine the correct run-time value of the .igot.plt entry. */
7514 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7515 initial_got_entry = sym_value;
7516 }
7517 else
7518 {
7519 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7520 initial_got_entry = (splt->output_section->vma
7521 + splt->output_offset);
7522 }
7523
7524 /* Fill in the entry in the global offset table. */
7525 bfd_put_32 (output_bfd, initial_got_entry,
7526 sgot->contents + got_offset);
7527 }
7528
7529 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7530 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7531 }
7532
7533 /* Some relocations map to different relocations depending on the
7534 target. Return the real relocation. */
7535
7536 static int
7537 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7538 int r_type)
7539 {
7540 switch (r_type)
7541 {
7542 case R_ARM_TARGET1:
7543 if (globals->target1_is_rel)
7544 return R_ARM_REL32;
7545 else
7546 return R_ARM_ABS32;
7547
7548 case R_ARM_TARGET2:
7549 return globals->target2_reloc;
7550
7551 default:
7552 return r_type;
7553 }
7554 }
7555
7556 /* Return the base VMA address which should be subtracted from real addresses
7557 when resolving @dtpoff relocation.
7558 This is PT_TLS segment p_vaddr. */
7559
7560 static bfd_vma
7561 dtpoff_base (struct bfd_link_info *info)
7562 {
7563 /* If tls_sec is NULL, we should have signalled an error already. */
7564 if (elf_hash_table (info)->tls_sec == NULL)
7565 return 0;
7566 return elf_hash_table (info)->tls_sec->vma;
7567 }
7568
7569 /* Return the relocation value for @tpoff relocation
7570 if STT_TLS virtual address is ADDRESS. */
7571
7572 static bfd_vma
7573 tpoff (struct bfd_link_info *info, bfd_vma address)
7574 {
7575 struct elf_link_hash_table *htab = elf_hash_table (info);
7576 bfd_vma base;
7577
7578 /* If tls_sec is NULL, we should have signalled an error already. */
7579 if (htab->tls_sec == NULL)
7580 return 0;
7581 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7582 return address - htab->tls_sec->vma + base;
7583 }
7584
7585 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7586 VALUE is the relocation value. */
7587
7588 static bfd_reloc_status_type
7589 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7590 {
7591 if (value > 0xfff)
7592 return bfd_reloc_overflow;
7593
7594 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7595 bfd_put_32 (abfd, value, data);
7596 return bfd_reloc_ok;
7597 }
7598
7599 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7600 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7601 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7602
7603 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7604 is to then call final_link_relocate. Return other values in the
7605 case of error.
7606
7607 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7608 the pre-relaxed code. It would be nice if the relocs were updated
7609 to match the optimization. */
7610
7611 static bfd_reloc_status_type
7612 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7613 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7614 Elf_Internal_Rela *rel, unsigned long is_local)
7615 {
7616 unsigned long insn;
7617
7618 switch (ELF32_R_TYPE (rel->r_info))
7619 {
7620 default:
7621 return bfd_reloc_notsupported;
7622
7623 case R_ARM_TLS_GOTDESC:
7624 if (is_local)
7625 insn = 0;
7626 else
7627 {
7628 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7629 if (insn & 1)
7630 insn -= 5; /* THUMB */
7631 else
7632 insn -= 8; /* ARM */
7633 }
7634 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7635 return bfd_reloc_continue;
7636
7637 case R_ARM_THM_TLS_DESCSEQ:
7638 /* Thumb insn. */
7639 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7640 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
7641 {
7642 if (is_local)
7643 /* nop */
7644 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7645 }
7646 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
7647 {
7648 if (is_local)
7649 /* nop */
7650 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7651 else
7652 /* ldr rx,[ry] */
7653 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
7654 }
7655 else if ((insn & 0xff87) == 0x4780) /* blx rx */
7656 {
7657 if (is_local)
7658 /* nop */
7659 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7660 else
7661 /* mov r0, rx */
7662 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
7663 contents + rel->r_offset);
7664 }
7665 else
7666 {
7667 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
7668 /* It's a 32 bit instruction, fetch the rest of it for
7669 error generation. */
7670 insn = (insn << 16)
7671 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
7672 (*_bfd_error_handler)
7673 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
7674 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7675 return bfd_reloc_notsupported;
7676 }
7677 break;
7678
7679 case R_ARM_TLS_DESCSEQ:
7680 /* arm insn. */
7681 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7682 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
7683 {
7684 if (is_local)
7685 /* mov rx, ry */
7686 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
7687 contents + rel->r_offset);
7688 }
7689 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
7690 {
7691 if (is_local)
7692 /* nop */
7693 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7694 else
7695 /* ldr rx,[ry] */
7696 bfd_put_32 (input_bfd, insn & 0xfffff000,
7697 contents + rel->r_offset);
7698 }
7699 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
7700 {
7701 if (is_local)
7702 /* nop */
7703 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7704 else
7705 /* mov r0, rx */
7706 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
7707 contents + rel->r_offset);
7708 }
7709 else
7710 {
7711 (*_bfd_error_handler)
7712 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
7713 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7714 return bfd_reloc_notsupported;
7715 }
7716 break;
7717
7718 case R_ARM_TLS_CALL:
7719 /* GD->IE relaxation, turn the instruction into 'nop' or
7720 'ldr r0, [pc,r0]' */
7721 insn = is_local ? 0xe1a00000 : 0xe79f0000;
7722 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7723 break;
7724
7725 case R_ARM_THM_TLS_CALL:
7726 /* GD->IE relaxation */
7727 if (!is_local)
7728 /* add r0,pc; ldr r0, [r0] */
7729 insn = 0x44786800;
7730 else if (arch_has_thumb2_nop (globals))
7731 /* nop.w */
7732 insn = 0xf3af8000;
7733 else
7734 /* nop; nop */
7735 insn = 0xbf00bf00;
7736
7737 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
7738 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
7739 break;
7740 }
7741 return bfd_reloc_ok;
7742 }
7743
7744 /* For a given value of n, calculate the value of G_n as required to
7745 deal with group relocations. We return it in the form of an
7746 encoded constant-and-rotation, together with the final residual. If n is
7747 specified as less than zero, then final_residual is filled with the
7748 input value and no further action is performed. */
7749
7750 static bfd_vma
7751 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
7752 {
7753 int current_n;
7754 bfd_vma g_n;
7755 bfd_vma encoded_g_n = 0;
7756 bfd_vma residual = value; /* Also known as Y_n. */
7757
7758 for (current_n = 0; current_n <= n; current_n++)
7759 {
7760 int shift;
7761
7762 /* Calculate which part of the value to mask. */
7763 if (residual == 0)
7764 shift = 0;
7765 else
7766 {
7767 int msb;
7768
7769 /* Determine the most significant bit in the residual and
7770 align the resulting value to a 2-bit boundary. */
7771 for (msb = 30; msb >= 0; msb -= 2)
7772 if (residual & (3 << msb))
7773 break;
7774
7775 /* The desired shift is now (msb - 6), or zero, whichever
7776 is the greater. */
7777 shift = msb - 6;
7778 if (shift < 0)
7779 shift = 0;
7780 }
7781
7782 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
7783 g_n = residual & (0xff << shift);
7784 encoded_g_n = (g_n >> shift)
7785 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
7786
7787 /* Calculate the residual for the next time around. */
7788 residual &= ~g_n;
7789 }
7790
7791 *final_residual = residual;
7792
7793 return encoded_g_n;
7794 }
7795
7796 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
7797 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
7798
7799 static int
7800 identify_add_or_sub (bfd_vma insn)
7801 {
7802 int opcode = insn & 0x1e00000;
7803
7804 if (opcode == 1 << 23) /* ADD */
7805 return 1;
7806
7807 if (opcode == 1 << 22) /* SUB */
7808 return -1;
7809
7810 return 0;
7811 }
7812
7813 /* Perform a relocation as part of a final link. */
7814
7815 static bfd_reloc_status_type
7816 elf32_arm_final_link_relocate (reloc_howto_type * howto,
7817 bfd * input_bfd,
7818 bfd * output_bfd,
7819 asection * input_section,
7820 bfd_byte * contents,
7821 Elf_Internal_Rela * rel,
7822 bfd_vma value,
7823 struct bfd_link_info * info,
7824 asection * sym_sec,
7825 const char * sym_name,
7826 unsigned char st_type,
7827 enum arm_st_branch_type branch_type,
7828 struct elf_link_hash_entry * h,
7829 bfd_boolean * unresolved_reloc_p,
7830 char ** error_message)
7831 {
7832 unsigned long r_type = howto->type;
7833 unsigned long r_symndx;
7834 bfd_byte * hit_data = contents + rel->r_offset;
7835 bfd_vma * local_got_offsets;
7836 bfd_vma * local_tlsdesc_gotents;
7837 asection * sgot;
7838 asection * splt;
7839 asection * sreloc = NULL;
7840 asection * srelgot;
7841 bfd_vma addend;
7842 bfd_signed_vma signed_addend;
7843 unsigned char dynreloc_st_type;
7844 bfd_vma dynreloc_value;
7845 struct elf32_arm_link_hash_table * globals;
7846 struct elf32_arm_link_hash_entry *eh;
7847 union gotplt_union *root_plt;
7848 struct arm_plt_info *arm_plt;
7849 bfd_vma plt_offset;
7850 bfd_vma gotplt_offset;
7851 bfd_boolean has_iplt_entry;
7852
7853 globals = elf32_arm_hash_table (info);
7854 if (globals == NULL)
7855 return bfd_reloc_notsupported;
7856
7857 BFD_ASSERT (is_arm_elf (input_bfd));
7858
7859 /* Some relocation types map to different relocations depending on the
7860 target. We pick the right one here. */
7861 r_type = arm_real_reloc_type (globals, r_type);
7862
7863 /* It is possible to have linker relaxations on some TLS access
7864 models. Update our information here. */
7865 r_type = elf32_arm_tls_transition (info, r_type, h);
7866
7867 if (r_type != howto->type)
7868 howto = elf32_arm_howto_from_type (r_type);
7869
7870 /* If the start address has been set, then set the EF_ARM_HASENTRY
7871 flag. Setting this more than once is redundant, but the cost is
7872 not too high, and it keeps the code simple.
7873
7874 The test is done here, rather than somewhere else, because the
7875 start address is only set just before the final link commences.
7876
7877 Note - if the user deliberately sets a start address of 0, the
7878 flag will not be set. */
7879 if (bfd_get_start_address (output_bfd) != 0)
7880 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
7881
7882 eh = (struct elf32_arm_link_hash_entry *) h;
7883 sgot = globals->root.sgot;
7884 local_got_offsets = elf_local_got_offsets (input_bfd);
7885 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
7886
7887 if (globals->root.dynamic_sections_created)
7888 srelgot = globals->root.srelgot;
7889 else
7890 srelgot = NULL;
7891
7892 r_symndx = ELF32_R_SYM (rel->r_info);
7893
7894 if (globals->use_rel)
7895 {
7896 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
7897
7898 if (addend & ((howto->src_mask + 1) >> 1))
7899 {
7900 signed_addend = -1;
7901 signed_addend &= ~ howto->src_mask;
7902 signed_addend |= addend;
7903 }
7904 else
7905 signed_addend = addend;
7906 }
7907 else
7908 addend = signed_addend = rel->r_addend;
7909
7910 /* Record the symbol information that should be used in dynamic
7911 relocations. */
7912 dynreloc_st_type = st_type;
7913 dynreloc_value = value;
7914 if (branch_type == ST_BRANCH_TO_THUMB)
7915 dynreloc_value |= 1;
7916
7917 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
7918 VALUE appropriately for relocations that we resolve at link time. */
7919 has_iplt_entry = FALSE;
7920 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
7921 && root_plt->offset != (bfd_vma) -1)
7922 {
7923 plt_offset = root_plt->offset;
7924 gotplt_offset = arm_plt->got_offset;
7925
7926 if (h == NULL || eh->is_iplt)
7927 {
7928 has_iplt_entry = TRUE;
7929 splt = globals->root.iplt;
7930
7931 /* Populate .iplt entries here, because not all of them will
7932 be seen by finish_dynamic_symbol. The lower bit is set if
7933 we have already populated the entry. */
7934 if (plt_offset & 1)
7935 plt_offset--;
7936 else
7937 {
7938 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
7939 -1, dynreloc_value);
7940 root_plt->offset |= 1;
7941 }
7942
7943 /* Static relocations always resolve to the .iplt entry. */
7944 st_type = STT_FUNC;
7945 value = (splt->output_section->vma
7946 + splt->output_offset
7947 + plt_offset);
7948 branch_type = ST_BRANCH_TO_ARM;
7949
7950 /* If there are non-call relocations that resolve to the .iplt
7951 entry, then all dynamic ones must too. */
7952 if (arm_plt->noncall_refcount != 0)
7953 {
7954 dynreloc_st_type = st_type;
7955 dynreloc_value = value;
7956 }
7957 }
7958 else
7959 /* We populate the .plt entry in finish_dynamic_symbol. */
7960 splt = globals->root.splt;
7961 }
7962 else
7963 {
7964 splt = NULL;
7965 plt_offset = (bfd_vma) -1;
7966 gotplt_offset = (bfd_vma) -1;
7967 }
7968
7969 switch (r_type)
7970 {
7971 case R_ARM_NONE:
7972 /* We don't need to find a value for this symbol. It's just a
7973 marker. */
7974 *unresolved_reloc_p = FALSE;
7975 return bfd_reloc_ok;
7976
7977 case R_ARM_ABS12:
7978 if (!globals->vxworks_p)
7979 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7980
7981 case R_ARM_PC24:
7982 case R_ARM_ABS32:
7983 case R_ARM_ABS32_NOI:
7984 case R_ARM_REL32:
7985 case R_ARM_REL32_NOI:
7986 case R_ARM_CALL:
7987 case R_ARM_JUMP24:
7988 case R_ARM_XPC25:
7989 case R_ARM_PREL31:
7990 case R_ARM_PLT32:
7991 /* Handle relocations which should use the PLT entry. ABS32/REL32
7992 will use the symbol's value, which may point to a PLT entry, but we
7993 don't need to handle that here. If we created a PLT entry, all
7994 branches in this object should go to it, except if the PLT is too
7995 far away, in which case a long branch stub should be inserted. */
7996 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
7997 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
7998 && r_type != R_ARM_CALL
7999 && r_type != R_ARM_JUMP24
8000 && r_type != R_ARM_PLT32)
8001 && plt_offset != (bfd_vma) -1)
8002 {
8003 /* If we've created a .plt section, and assigned a PLT entry
8004 to this function, it must either be a STT_GNU_IFUNC reference
8005 or not be known to bind locally. In other cases, we should
8006 have cleared the PLT entry by now. */
8007 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8008
8009 value = (splt->output_section->vma
8010 + splt->output_offset
8011 + plt_offset);
8012 *unresolved_reloc_p = FALSE;
8013 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8014 contents, rel->r_offset, value,
8015 rel->r_addend);
8016 }
8017
8018 /* When generating a shared object or relocatable executable, these
8019 relocations are copied into the output file to be resolved at
8020 run time. */
8021 if ((info->shared || globals->root.is_relocatable_executable)
8022 && (input_section->flags & SEC_ALLOC)
8023 && !(globals->vxworks_p
8024 && strcmp (input_section->output_section->name,
8025 ".tls_vars") == 0)
8026 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8027 || !SYMBOL_CALLS_LOCAL (info, h))
8028 && (!strstr (input_section->name, STUB_SUFFIX))
8029 && (h == NULL
8030 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8031 || h->root.type != bfd_link_hash_undefweak)
8032 && r_type != R_ARM_PC24
8033 && r_type != R_ARM_CALL
8034 && r_type != R_ARM_JUMP24
8035 && r_type != R_ARM_PREL31
8036 && r_type != R_ARM_PLT32)
8037 {
8038 Elf_Internal_Rela outrel;
8039 bfd_boolean skip, relocate;
8040
8041 *unresolved_reloc_p = FALSE;
8042
8043 if (sreloc == NULL && globals->root.dynamic_sections_created)
8044 {
8045 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8046 ! globals->use_rel);
8047
8048 if (sreloc == NULL)
8049 return bfd_reloc_notsupported;
8050 }
8051
8052 skip = FALSE;
8053 relocate = FALSE;
8054
8055 outrel.r_addend = addend;
8056 outrel.r_offset =
8057 _bfd_elf_section_offset (output_bfd, info, input_section,
8058 rel->r_offset);
8059 if (outrel.r_offset == (bfd_vma) -1)
8060 skip = TRUE;
8061 else if (outrel.r_offset == (bfd_vma) -2)
8062 skip = TRUE, relocate = TRUE;
8063 outrel.r_offset += (input_section->output_section->vma
8064 + input_section->output_offset);
8065
8066 if (skip)
8067 memset (&outrel, 0, sizeof outrel);
8068 else if (h != NULL
8069 && h->dynindx != -1
8070 && (!info->shared
8071 || !info->symbolic
8072 || !h->def_regular))
8073 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8074 else
8075 {
8076 int symbol;
8077
8078 /* This symbol is local, or marked to become local. */
8079 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8080 if (globals->symbian_p)
8081 {
8082 asection *osec;
8083
8084 /* On Symbian OS, the data segment and text segement
8085 can be relocated independently. Therefore, we
8086 must indicate the segment to which this
8087 relocation is relative. The BPABI allows us to
8088 use any symbol in the right segment; we just use
8089 the section symbol as it is convenient. (We
8090 cannot use the symbol given by "h" directly as it
8091 will not appear in the dynamic symbol table.)
8092
8093 Note that the dynamic linker ignores the section
8094 symbol value, so we don't subtract osec->vma
8095 from the emitted reloc addend. */
8096 if (sym_sec)
8097 osec = sym_sec->output_section;
8098 else
8099 osec = input_section->output_section;
8100 symbol = elf_section_data (osec)->dynindx;
8101 if (symbol == 0)
8102 {
8103 struct elf_link_hash_table *htab = elf_hash_table (info);
8104
8105 if ((osec->flags & SEC_READONLY) == 0
8106 && htab->data_index_section != NULL)
8107 osec = htab->data_index_section;
8108 else
8109 osec = htab->text_index_section;
8110 symbol = elf_section_data (osec)->dynindx;
8111 }
8112 BFD_ASSERT (symbol != 0);
8113 }
8114 else
8115 /* On SVR4-ish systems, the dynamic loader cannot
8116 relocate the text and data segments independently,
8117 so the symbol does not matter. */
8118 symbol = 0;
8119 if (dynreloc_st_type == STT_GNU_IFUNC)
8120 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8121 to the .iplt entry. Instead, every non-call reference
8122 must use an R_ARM_IRELATIVE relocation to obtain the
8123 correct run-time address. */
8124 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8125 else
8126 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8127 if (globals->use_rel)
8128 relocate = TRUE;
8129 else
8130 outrel.r_addend += dynreloc_value;
8131 }
8132
8133 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8134
8135 /* If this reloc is against an external symbol, we do not want to
8136 fiddle with the addend. Otherwise, we need to include the symbol
8137 value so that it becomes an addend for the dynamic reloc. */
8138 if (! relocate)
8139 return bfd_reloc_ok;
8140
8141 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8142 contents, rel->r_offset,
8143 dynreloc_value, (bfd_vma) 0);
8144 }
8145 else switch (r_type)
8146 {
8147 case R_ARM_ABS12:
8148 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8149
8150 case R_ARM_XPC25: /* Arm BLX instruction. */
8151 case R_ARM_CALL:
8152 case R_ARM_JUMP24:
8153 case R_ARM_PC24: /* Arm B/BL instruction. */
8154 case R_ARM_PLT32:
8155 {
8156 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8157
8158 if (r_type == R_ARM_XPC25)
8159 {
8160 /* Check for Arm calling Arm function. */
8161 /* FIXME: Should we translate the instruction into a BL
8162 instruction instead ? */
8163 if (branch_type != ST_BRANCH_TO_THUMB)
8164 (*_bfd_error_handler)
8165 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8166 input_bfd,
8167 h ? h->root.root.string : "(local)");
8168 }
8169 else if (r_type == R_ARM_PC24)
8170 {
8171 /* Check for Arm calling Thumb function. */
8172 if (branch_type == ST_BRANCH_TO_THUMB)
8173 {
8174 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8175 output_bfd, input_section,
8176 hit_data, sym_sec, rel->r_offset,
8177 signed_addend, value,
8178 error_message))
8179 return bfd_reloc_ok;
8180 else
8181 return bfd_reloc_dangerous;
8182 }
8183 }
8184
8185 /* Check if a stub has to be inserted because the
8186 destination is too far or we are changing mode. */
8187 if ( r_type == R_ARM_CALL
8188 || r_type == R_ARM_JUMP24
8189 || r_type == R_ARM_PLT32)
8190 {
8191 enum elf32_arm_stub_type stub_type = arm_stub_none;
8192 struct elf32_arm_link_hash_entry *hash;
8193
8194 hash = (struct elf32_arm_link_hash_entry *) h;
8195 stub_type = arm_type_of_stub (info, input_section, rel,
8196 st_type, &branch_type,
8197 hash, value, sym_sec,
8198 input_bfd, sym_name);
8199
8200 if (stub_type != arm_stub_none)
8201 {
8202 /* The target is out of reach, so redirect the
8203 branch to the local stub for this function. */
8204
8205 stub_entry = elf32_arm_get_stub_entry (input_section,
8206 sym_sec, h,
8207 rel, globals,
8208 stub_type);
8209 if (stub_entry != NULL)
8210 value = (stub_entry->stub_offset
8211 + stub_entry->stub_sec->output_offset
8212 + stub_entry->stub_sec->output_section->vma);
8213 }
8214 else
8215 {
8216 /* If the call goes through a PLT entry, make sure to
8217 check distance to the right destination address. */
8218 if (plt_offset != (bfd_vma) -1)
8219 {
8220 value = (splt->output_section->vma
8221 + splt->output_offset
8222 + plt_offset);
8223 *unresolved_reloc_p = FALSE;
8224 /* The PLT entry is in ARM mode, regardless of the
8225 target function. */
8226 branch_type = ST_BRANCH_TO_ARM;
8227 }
8228 }
8229 }
8230
8231 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8232 where:
8233 S is the address of the symbol in the relocation.
8234 P is address of the instruction being relocated.
8235 A is the addend (extracted from the instruction) in bytes.
8236
8237 S is held in 'value'.
8238 P is the base address of the section containing the
8239 instruction plus the offset of the reloc into that
8240 section, ie:
8241 (input_section->output_section->vma +
8242 input_section->output_offset +
8243 rel->r_offset).
8244 A is the addend, converted into bytes, ie:
8245 (signed_addend * 4)
8246
8247 Note: None of these operations have knowledge of the pipeline
8248 size of the processor, thus it is up to the assembler to
8249 encode this information into the addend. */
8250 value -= (input_section->output_section->vma
8251 + input_section->output_offset);
8252 value -= rel->r_offset;
8253 if (globals->use_rel)
8254 value += (signed_addend << howto->size);
8255 else
8256 /* RELA addends do not have to be adjusted by howto->size. */
8257 value += signed_addend;
8258
8259 signed_addend = value;
8260 signed_addend >>= howto->rightshift;
8261
8262 /* A branch to an undefined weak symbol is turned into a jump to
8263 the next instruction unless a PLT entry will be created.
8264 Do the same for local undefined symbols (but not for STN_UNDEF).
8265 The jump to the next instruction is optimized as a NOP depending
8266 on the architecture. */
8267 if (h ? (h->root.type == bfd_link_hash_undefweak
8268 && plt_offset == (bfd_vma) -1)
8269 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8270 {
8271 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8272
8273 if (arch_has_arm_nop (globals))
8274 value |= 0x0320f000;
8275 else
8276 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8277 }
8278 else
8279 {
8280 /* Perform a signed range check. */
8281 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8282 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8283 return bfd_reloc_overflow;
8284
8285 addend = (value & 2);
8286
8287 value = (signed_addend & howto->dst_mask)
8288 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8289
8290 if (r_type == R_ARM_CALL)
8291 {
8292 /* Set the H bit in the BLX instruction. */
8293 if (branch_type == ST_BRANCH_TO_THUMB)
8294 {
8295 if (addend)
8296 value |= (1 << 24);
8297 else
8298 value &= ~(bfd_vma)(1 << 24);
8299 }
8300
8301 /* Select the correct instruction (BL or BLX). */
8302 /* Only if we are not handling a BL to a stub. In this
8303 case, mode switching is performed by the stub. */
8304 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8305 value |= (1 << 28);
8306 else
8307 {
8308 value &= ~(bfd_vma)(1 << 28);
8309 value |= (1 << 24);
8310 }
8311 }
8312 }
8313 }
8314 break;
8315
8316 case R_ARM_ABS32:
8317 value += addend;
8318 if (branch_type == ST_BRANCH_TO_THUMB)
8319 value |= 1;
8320 break;
8321
8322 case R_ARM_ABS32_NOI:
8323 value += addend;
8324 break;
8325
8326 case R_ARM_REL32:
8327 value += addend;
8328 if (branch_type == ST_BRANCH_TO_THUMB)
8329 value |= 1;
8330 value -= (input_section->output_section->vma
8331 + input_section->output_offset + rel->r_offset);
8332 break;
8333
8334 case R_ARM_REL32_NOI:
8335 value += addend;
8336 value -= (input_section->output_section->vma
8337 + input_section->output_offset + rel->r_offset);
8338 break;
8339
8340 case R_ARM_PREL31:
8341 value -= (input_section->output_section->vma
8342 + input_section->output_offset + rel->r_offset);
8343 value += signed_addend;
8344 if (! h || h->root.type != bfd_link_hash_undefweak)
8345 {
8346 /* Check for overflow. */
8347 if ((value ^ (value >> 1)) & (1 << 30))
8348 return bfd_reloc_overflow;
8349 }
8350 value &= 0x7fffffff;
8351 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8352 if (branch_type == ST_BRANCH_TO_THUMB)
8353 value |= 1;
8354 break;
8355 }
8356
8357 bfd_put_32 (input_bfd, value, hit_data);
8358 return bfd_reloc_ok;
8359
8360 case R_ARM_ABS8:
8361 value += addend;
8362
8363 /* There is no way to tell whether the user intended to use a signed or
8364 unsigned addend. When checking for overflow we accept either,
8365 as specified by the AAELF. */
8366 if ((long) value > 0xff || (long) value < -0x80)
8367 return bfd_reloc_overflow;
8368
8369 bfd_put_8 (input_bfd, value, hit_data);
8370 return bfd_reloc_ok;
8371
8372 case R_ARM_ABS16:
8373 value += addend;
8374
8375 /* See comment for R_ARM_ABS8. */
8376 if ((long) value > 0xffff || (long) value < -0x8000)
8377 return bfd_reloc_overflow;
8378
8379 bfd_put_16 (input_bfd, value, hit_data);
8380 return bfd_reloc_ok;
8381
8382 case R_ARM_THM_ABS5:
8383 /* Support ldr and str instructions for the thumb. */
8384 if (globals->use_rel)
8385 {
8386 /* Need to refetch addend. */
8387 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8388 /* ??? Need to determine shift amount from operand size. */
8389 addend >>= howto->rightshift;
8390 }
8391 value += addend;
8392
8393 /* ??? Isn't value unsigned? */
8394 if ((long) value > 0x1f || (long) value < -0x10)
8395 return bfd_reloc_overflow;
8396
8397 /* ??? Value needs to be properly shifted into place first. */
8398 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8399 bfd_put_16 (input_bfd, value, hit_data);
8400 return bfd_reloc_ok;
8401
8402 case R_ARM_THM_ALU_PREL_11_0:
8403 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8404 {
8405 bfd_vma insn;
8406 bfd_signed_vma relocation;
8407
8408 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8409 | bfd_get_16 (input_bfd, hit_data + 2);
8410
8411 if (globals->use_rel)
8412 {
8413 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8414 | ((insn & (1 << 26)) >> 15);
8415 if (insn & 0xf00000)
8416 signed_addend = -signed_addend;
8417 }
8418
8419 relocation = value + signed_addend;
8420 relocation -= (input_section->output_section->vma
8421 + input_section->output_offset
8422 + rel->r_offset);
8423
8424 value = abs (relocation);
8425
8426 if (value >= 0x1000)
8427 return bfd_reloc_overflow;
8428
8429 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8430 | ((value & 0x700) << 4)
8431 | ((value & 0x800) << 15);
8432 if (relocation < 0)
8433 insn |= 0xa00000;
8434
8435 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8436 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8437
8438 return bfd_reloc_ok;
8439 }
8440
8441 case R_ARM_THM_PC8:
8442 /* PR 10073: This reloc is not generated by the GNU toolchain,
8443 but it is supported for compatibility with third party libraries
8444 generated by other compilers, specifically the ARM/IAR. */
8445 {
8446 bfd_vma insn;
8447 bfd_signed_vma relocation;
8448
8449 insn = bfd_get_16 (input_bfd, hit_data);
8450
8451 if (globals->use_rel)
8452 addend = (insn & 0x00ff) << 2;
8453
8454 relocation = value + addend;
8455 relocation -= (input_section->output_section->vma
8456 + input_section->output_offset
8457 + rel->r_offset);
8458
8459 value = abs (relocation);
8460
8461 /* We do not check for overflow of this reloc. Although strictly
8462 speaking this is incorrect, it appears to be necessary in order
8463 to work with IAR generated relocs. Since GCC and GAS do not
8464 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8465 a problem for them. */
8466 value &= 0x3fc;
8467
8468 insn = (insn & 0xff00) | (value >> 2);
8469
8470 bfd_put_16 (input_bfd, insn, hit_data);
8471
8472 return bfd_reloc_ok;
8473 }
8474
8475 case R_ARM_THM_PC12:
8476 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8477 {
8478 bfd_vma insn;
8479 bfd_signed_vma relocation;
8480
8481 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8482 | bfd_get_16 (input_bfd, hit_data + 2);
8483
8484 if (globals->use_rel)
8485 {
8486 signed_addend = insn & 0xfff;
8487 if (!(insn & (1 << 23)))
8488 signed_addend = -signed_addend;
8489 }
8490
8491 relocation = value + signed_addend;
8492 relocation -= (input_section->output_section->vma
8493 + input_section->output_offset
8494 + rel->r_offset);
8495
8496 value = abs (relocation);
8497
8498 if (value >= 0x1000)
8499 return bfd_reloc_overflow;
8500
8501 insn = (insn & 0xff7ff000) | value;
8502 if (relocation >= 0)
8503 insn |= (1 << 23);
8504
8505 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8506 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8507
8508 return bfd_reloc_ok;
8509 }
8510
8511 case R_ARM_THM_XPC22:
8512 case R_ARM_THM_CALL:
8513 case R_ARM_THM_JUMP24:
8514 /* Thumb BL (branch long instruction). */
8515 {
8516 bfd_vma relocation;
8517 bfd_vma reloc_sign;
8518 bfd_boolean overflow = FALSE;
8519 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8520 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8521 bfd_signed_vma reloc_signed_max;
8522 bfd_signed_vma reloc_signed_min;
8523 bfd_vma check;
8524 bfd_signed_vma signed_check;
8525 int bitsize;
8526 const int thumb2 = using_thumb2 (globals);
8527
8528 /* A branch to an undefined weak symbol is turned into a jump to
8529 the next instruction unless a PLT entry will be created.
8530 The jump to the next instruction is optimized as a NOP.W for
8531 Thumb-2 enabled architectures. */
8532 if (h && h->root.type == bfd_link_hash_undefweak
8533 && plt_offset == (bfd_vma) -1)
8534 {
8535 if (arch_has_thumb2_nop (globals))
8536 {
8537 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8538 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8539 }
8540 else
8541 {
8542 bfd_put_16 (input_bfd, 0xe000, hit_data);
8543 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8544 }
8545 return bfd_reloc_ok;
8546 }
8547
8548 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8549 with Thumb-1) involving the J1 and J2 bits. */
8550 if (globals->use_rel)
8551 {
8552 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8553 bfd_vma upper = upper_insn & 0x3ff;
8554 bfd_vma lower = lower_insn & 0x7ff;
8555 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8556 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8557 bfd_vma i1 = j1 ^ s ? 0 : 1;
8558 bfd_vma i2 = j2 ^ s ? 0 : 1;
8559
8560 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8561 /* Sign extend. */
8562 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8563
8564 signed_addend = addend;
8565 }
8566
8567 if (r_type == R_ARM_THM_XPC22)
8568 {
8569 /* Check for Thumb to Thumb call. */
8570 /* FIXME: Should we translate the instruction into a BL
8571 instruction instead ? */
8572 if (branch_type == ST_BRANCH_TO_THUMB)
8573 (*_bfd_error_handler)
8574 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8575 input_bfd,
8576 h ? h->root.root.string : "(local)");
8577 }
8578 else
8579 {
8580 /* If it is not a call to Thumb, assume call to Arm.
8581 If it is a call relative to a section name, then it is not a
8582 function call at all, but rather a long jump. Calls through
8583 the PLT do not require stubs. */
8584 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8585 {
8586 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8587 {
8588 /* Convert BL to BLX. */
8589 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8590 }
8591 else if (( r_type != R_ARM_THM_CALL)
8592 && (r_type != R_ARM_THM_JUMP24))
8593 {
8594 if (elf32_thumb_to_arm_stub
8595 (info, sym_name, input_bfd, output_bfd, input_section,
8596 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8597 error_message))
8598 return bfd_reloc_ok;
8599 else
8600 return bfd_reloc_dangerous;
8601 }
8602 }
8603 else if (branch_type == ST_BRANCH_TO_THUMB
8604 && globals->use_blx
8605 && r_type == R_ARM_THM_CALL)
8606 {
8607 /* Make sure this is a BL. */
8608 lower_insn |= 0x1800;
8609 }
8610 }
8611
8612 enum elf32_arm_stub_type stub_type = arm_stub_none;
8613 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8614 {
8615 /* Check if a stub has to be inserted because the destination
8616 is too far. */
8617 struct elf32_arm_stub_hash_entry *stub_entry;
8618 struct elf32_arm_link_hash_entry *hash;
8619
8620 hash = (struct elf32_arm_link_hash_entry *) h;
8621
8622 stub_type = arm_type_of_stub (info, input_section, rel,
8623 st_type, &branch_type,
8624 hash, value, sym_sec,
8625 input_bfd, sym_name);
8626
8627 if (stub_type != arm_stub_none)
8628 {
8629 /* The target is out of reach or we are changing modes, so
8630 redirect the branch to the local stub for this
8631 function. */
8632 stub_entry = elf32_arm_get_stub_entry (input_section,
8633 sym_sec, h,
8634 rel, globals,
8635 stub_type);
8636 if (stub_entry != NULL)
8637 value = (stub_entry->stub_offset
8638 + stub_entry->stub_sec->output_offset
8639 + stub_entry->stub_sec->output_section->vma);
8640
8641 /* If this call becomes a call to Arm, force BLX. */
8642 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
8643 {
8644 if ((stub_entry
8645 && !arm_stub_is_thumb (stub_entry->stub_type))
8646 || branch_type != ST_BRANCH_TO_THUMB)
8647 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8648 }
8649 }
8650 }
8651
8652 /* Handle calls via the PLT. */
8653 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
8654 {
8655 value = (splt->output_section->vma
8656 + splt->output_offset
8657 + plt_offset);
8658
8659 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8660 {
8661 /* If the Thumb BLX instruction is available, convert
8662 the BL to a BLX instruction to call the ARM-mode
8663 PLT entry. */
8664 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8665 branch_type = ST_BRANCH_TO_ARM;
8666 }
8667 else
8668 {
8669 /* Target the Thumb stub before the ARM PLT entry. */
8670 value -= PLT_THUMB_STUB_SIZE;
8671 branch_type = ST_BRANCH_TO_THUMB;
8672 }
8673 *unresolved_reloc_p = FALSE;
8674 }
8675
8676 relocation = value + signed_addend;
8677
8678 relocation -= (input_section->output_section->vma
8679 + input_section->output_offset
8680 + rel->r_offset);
8681
8682 check = relocation >> howto->rightshift;
8683
8684 /* If this is a signed value, the rightshift just dropped
8685 leading 1 bits (assuming twos complement). */
8686 if ((bfd_signed_vma) relocation >= 0)
8687 signed_check = check;
8688 else
8689 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
8690
8691 /* Calculate the permissable maximum and minimum values for
8692 this relocation according to whether we're relocating for
8693 Thumb-2 or not. */
8694 bitsize = howto->bitsize;
8695 if (!thumb2)
8696 bitsize -= 2;
8697 reloc_signed_max = (1 << (bitsize - 1)) - 1;
8698 reloc_signed_min = ~reloc_signed_max;
8699
8700 /* Assumes two's complement. */
8701 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8702 overflow = TRUE;
8703
8704 if ((lower_insn & 0x5000) == 0x4000)
8705 /* For a BLX instruction, make sure that the relocation is rounded up
8706 to a word boundary. This follows the semantics of the instruction
8707 which specifies that bit 1 of the target address will come from bit
8708 1 of the base address. */
8709 relocation = (relocation + 2) & ~ 3;
8710
8711 /* Put RELOCATION back into the insn. Assumes two's complement.
8712 We use the Thumb-2 encoding, which is safe even if dealing with
8713 a Thumb-1 instruction by virtue of our overflow check above. */
8714 reloc_sign = (signed_check < 0) ? 1 : 0;
8715 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
8716 | ((relocation >> 12) & 0x3ff)
8717 | (reloc_sign << 10);
8718 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
8719 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
8720 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
8721 | ((relocation >> 1) & 0x7ff);
8722
8723 /* Put the relocated value back in the object file: */
8724 bfd_put_16 (input_bfd, upper_insn, hit_data);
8725 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8726
8727 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8728 }
8729 break;
8730
8731 case R_ARM_THM_JUMP19:
8732 /* Thumb32 conditional branch instruction. */
8733 {
8734 bfd_vma relocation;
8735 bfd_boolean overflow = FALSE;
8736 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8737 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8738 bfd_signed_vma reloc_signed_max = 0xffffe;
8739 bfd_signed_vma reloc_signed_min = -0x100000;
8740 bfd_signed_vma signed_check;
8741
8742 /* Need to refetch the addend, reconstruct the top three bits,
8743 and squish the two 11 bit pieces together. */
8744 if (globals->use_rel)
8745 {
8746 bfd_vma S = (upper_insn & 0x0400) >> 10;
8747 bfd_vma upper = (upper_insn & 0x003f);
8748 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
8749 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
8750 bfd_vma lower = (lower_insn & 0x07ff);
8751
8752 upper |= J1 << 6;
8753 upper |= J2 << 7;
8754 upper |= (!S) << 8;
8755 upper -= 0x0100; /* Sign extend. */
8756
8757 addend = (upper << 12) | (lower << 1);
8758 signed_addend = addend;
8759 }
8760
8761 /* Handle calls via the PLT. */
8762 if (plt_offset != (bfd_vma) -1)
8763 {
8764 value = (splt->output_section->vma
8765 + splt->output_offset
8766 + plt_offset);
8767 /* Target the Thumb stub before the ARM PLT entry. */
8768 value -= PLT_THUMB_STUB_SIZE;
8769 *unresolved_reloc_p = FALSE;
8770 }
8771
8772 /* ??? Should handle interworking? GCC might someday try to
8773 use this for tail calls. */
8774
8775 relocation = value + signed_addend;
8776 relocation -= (input_section->output_section->vma
8777 + input_section->output_offset
8778 + rel->r_offset);
8779 signed_check = (bfd_signed_vma) relocation;
8780
8781 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8782 overflow = TRUE;
8783
8784 /* Put RELOCATION back into the insn. */
8785 {
8786 bfd_vma S = (relocation & 0x00100000) >> 20;
8787 bfd_vma J2 = (relocation & 0x00080000) >> 19;
8788 bfd_vma J1 = (relocation & 0x00040000) >> 18;
8789 bfd_vma hi = (relocation & 0x0003f000) >> 12;
8790 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
8791
8792 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
8793 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
8794 }
8795
8796 /* Put the relocated value back in the object file: */
8797 bfd_put_16 (input_bfd, upper_insn, hit_data);
8798 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8799
8800 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8801 }
8802
8803 case R_ARM_THM_JUMP11:
8804 case R_ARM_THM_JUMP8:
8805 case R_ARM_THM_JUMP6:
8806 /* Thumb B (branch) instruction). */
8807 {
8808 bfd_signed_vma relocation;
8809 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
8810 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
8811 bfd_signed_vma signed_check;
8812
8813 /* CZB cannot jump backward. */
8814 if (r_type == R_ARM_THM_JUMP6)
8815 reloc_signed_min = 0;
8816
8817 if (globals->use_rel)
8818 {
8819 /* Need to refetch addend. */
8820 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8821 if (addend & ((howto->src_mask + 1) >> 1))
8822 {
8823 signed_addend = -1;
8824 signed_addend &= ~ howto->src_mask;
8825 signed_addend |= addend;
8826 }
8827 else
8828 signed_addend = addend;
8829 /* The value in the insn has been right shifted. We need to
8830 undo this, so that we can perform the address calculation
8831 in terms of bytes. */
8832 signed_addend <<= howto->rightshift;
8833 }
8834 relocation = value + signed_addend;
8835
8836 relocation -= (input_section->output_section->vma
8837 + input_section->output_offset
8838 + rel->r_offset);
8839
8840 relocation >>= howto->rightshift;
8841 signed_check = relocation;
8842
8843 if (r_type == R_ARM_THM_JUMP6)
8844 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
8845 else
8846 relocation &= howto->dst_mask;
8847 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
8848
8849 bfd_put_16 (input_bfd, relocation, hit_data);
8850
8851 /* Assumes two's complement. */
8852 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8853 return bfd_reloc_overflow;
8854
8855 return bfd_reloc_ok;
8856 }
8857
8858 case R_ARM_ALU_PCREL7_0:
8859 case R_ARM_ALU_PCREL15_8:
8860 case R_ARM_ALU_PCREL23_15:
8861 {
8862 bfd_vma insn;
8863 bfd_vma relocation;
8864
8865 insn = bfd_get_32 (input_bfd, hit_data);
8866 if (globals->use_rel)
8867 {
8868 /* Extract the addend. */
8869 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
8870 signed_addend = addend;
8871 }
8872 relocation = value + signed_addend;
8873
8874 relocation -= (input_section->output_section->vma
8875 + input_section->output_offset
8876 + rel->r_offset);
8877 insn = (insn & ~0xfff)
8878 | ((howto->bitpos << 7) & 0xf00)
8879 | ((relocation >> howto->bitpos) & 0xff);
8880 bfd_put_32 (input_bfd, value, hit_data);
8881 }
8882 return bfd_reloc_ok;
8883
8884 case R_ARM_GNU_VTINHERIT:
8885 case R_ARM_GNU_VTENTRY:
8886 return bfd_reloc_ok;
8887
8888 case R_ARM_GOTOFF32:
8889 /* Relocation is relative to the start of the
8890 global offset table. */
8891
8892 BFD_ASSERT (sgot != NULL);
8893 if (sgot == NULL)
8894 return bfd_reloc_notsupported;
8895
8896 /* If we are addressing a Thumb function, we need to adjust the
8897 address by one, so that attempts to call the function pointer will
8898 correctly interpret it as Thumb code. */
8899 if (branch_type == ST_BRANCH_TO_THUMB)
8900 value += 1;
8901
8902 /* Note that sgot->output_offset is not involved in this
8903 calculation. We always want the start of .got. If we
8904 define _GLOBAL_OFFSET_TABLE in a different way, as is
8905 permitted by the ABI, we might have to change this
8906 calculation. */
8907 value -= sgot->output_section->vma;
8908 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8909 contents, rel->r_offset, value,
8910 rel->r_addend);
8911
8912 case R_ARM_GOTPC:
8913 /* Use global offset table as symbol value. */
8914 BFD_ASSERT (sgot != NULL);
8915
8916 if (sgot == NULL)
8917 return bfd_reloc_notsupported;
8918
8919 *unresolved_reloc_p = FALSE;
8920 value = sgot->output_section->vma;
8921 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8922 contents, rel->r_offset, value,
8923 rel->r_addend);
8924
8925 case R_ARM_GOT32:
8926 case R_ARM_GOT_PREL:
8927 /* Relocation is to the entry for this symbol in the
8928 global offset table. */
8929 if (sgot == NULL)
8930 return bfd_reloc_notsupported;
8931
8932 if (dynreloc_st_type == STT_GNU_IFUNC
8933 && plt_offset != (bfd_vma) -1
8934 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
8935 {
8936 /* We have a relocation against a locally-binding STT_GNU_IFUNC
8937 symbol, and the relocation resolves directly to the runtime
8938 target rather than to the .iplt entry. This means that any
8939 .got entry would be the same value as the .igot.plt entry,
8940 so there's no point creating both. */
8941 sgot = globals->root.igotplt;
8942 value = sgot->output_offset + gotplt_offset;
8943 }
8944 else if (h != NULL)
8945 {
8946 bfd_vma off;
8947
8948 off = h->got.offset;
8949 BFD_ASSERT (off != (bfd_vma) -1);
8950 if ((off & 1) != 0)
8951 {
8952 /* We have already processsed one GOT relocation against
8953 this symbol. */
8954 off &= ~1;
8955 if (globals->root.dynamic_sections_created
8956 && !SYMBOL_REFERENCES_LOCAL (info, h))
8957 *unresolved_reloc_p = FALSE;
8958 }
8959 else
8960 {
8961 Elf_Internal_Rela outrel;
8962
8963 if (!SYMBOL_REFERENCES_LOCAL (info, h))
8964 {
8965 /* If the symbol doesn't resolve locally in a static
8966 object, we have an undefined reference. If the
8967 symbol doesn't resolve locally in a dynamic object,
8968 it should be resolved by the dynamic linker. */
8969 if (globals->root.dynamic_sections_created)
8970 {
8971 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
8972 *unresolved_reloc_p = FALSE;
8973 }
8974 else
8975 outrel.r_info = 0;
8976 outrel.r_addend = 0;
8977 }
8978 else
8979 {
8980 if (dynreloc_st_type == STT_GNU_IFUNC)
8981 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8982 else if (info->shared)
8983 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
8984 else
8985 outrel.r_info = 0;
8986 outrel.r_addend = dynreloc_value;
8987 }
8988
8989 /* The GOT entry is initialized to zero by default.
8990 See if we should install a different value. */
8991 if (outrel.r_addend != 0
8992 && (outrel.r_info == 0 || globals->use_rel))
8993 {
8994 bfd_put_32 (output_bfd, outrel.r_addend,
8995 sgot->contents + off);
8996 outrel.r_addend = 0;
8997 }
8998
8999 if (outrel.r_info != 0)
9000 {
9001 outrel.r_offset = (sgot->output_section->vma
9002 + sgot->output_offset
9003 + off);
9004 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9005 }
9006 h->got.offset |= 1;
9007 }
9008 value = sgot->output_offset + off;
9009 }
9010 else
9011 {
9012 bfd_vma off;
9013
9014 BFD_ASSERT (local_got_offsets != NULL &&
9015 local_got_offsets[r_symndx] != (bfd_vma) -1);
9016
9017 off = local_got_offsets[r_symndx];
9018
9019 /* The offset must always be a multiple of 4. We use the
9020 least significant bit to record whether we have already
9021 generated the necessary reloc. */
9022 if ((off & 1) != 0)
9023 off &= ~1;
9024 else
9025 {
9026 if (globals->use_rel)
9027 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9028
9029 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9030 {
9031 Elf_Internal_Rela outrel;
9032
9033 outrel.r_addend = addend + dynreloc_value;
9034 outrel.r_offset = (sgot->output_section->vma
9035 + sgot->output_offset
9036 + off);
9037 if (dynreloc_st_type == STT_GNU_IFUNC)
9038 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9039 else
9040 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9041 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9042 }
9043
9044 local_got_offsets[r_symndx] |= 1;
9045 }
9046
9047 value = sgot->output_offset + off;
9048 }
9049 if (r_type != R_ARM_GOT32)
9050 value += sgot->output_section->vma;
9051
9052 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9053 contents, rel->r_offset, value,
9054 rel->r_addend);
9055
9056 case R_ARM_TLS_LDO32:
9057 value = value - dtpoff_base (info);
9058
9059 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9060 contents, rel->r_offset, value,
9061 rel->r_addend);
9062
9063 case R_ARM_TLS_LDM32:
9064 {
9065 bfd_vma off;
9066
9067 if (sgot == NULL)
9068 abort ();
9069
9070 off = globals->tls_ldm_got.offset;
9071
9072 if ((off & 1) != 0)
9073 off &= ~1;
9074 else
9075 {
9076 /* If we don't know the module number, create a relocation
9077 for it. */
9078 if (info->shared)
9079 {
9080 Elf_Internal_Rela outrel;
9081
9082 if (srelgot == NULL)
9083 abort ();
9084
9085 outrel.r_addend = 0;
9086 outrel.r_offset = (sgot->output_section->vma
9087 + sgot->output_offset + off);
9088 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9089
9090 if (globals->use_rel)
9091 bfd_put_32 (output_bfd, outrel.r_addend,
9092 sgot->contents + off);
9093
9094 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9095 }
9096 else
9097 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9098
9099 globals->tls_ldm_got.offset |= 1;
9100 }
9101
9102 value = sgot->output_section->vma + sgot->output_offset + off
9103 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9104
9105 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9106 contents, rel->r_offset, value,
9107 rel->r_addend);
9108 }
9109
9110 case R_ARM_TLS_CALL:
9111 case R_ARM_THM_TLS_CALL:
9112 case R_ARM_TLS_GD32:
9113 case R_ARM_TLS_IE32:
9114 case R_ARM_TLS_GOTDESC:
9115 case R_ARM_TLS_DESCSEQ:
9116 case R_ARM_THM_TLS_DESCSEQ:
9117 {
9118 bfd_vma off, offplt;
9119 int indx = 0;
9120 char tls_type;
9121
9122 BFD_ASSERT (sgot != NULL);
9123
9124 if (h != NULL)
9125 {
9126 bfd_boolean dyn;
9127 dyn = globals->root.dynamic_sections_created;
9128 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9129 && (!info->shared
9130 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9131 {
9132 *unresolved_reloc_p = FALSE;
9133 indx = h->dynindx;
9134 }
9135 off = h->got.offset;
9136 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9137 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9138 }
9139 else
9140 {
9141 BFD_ASSERT (local_got_offsets != NULL);
9142 off = local_got_offsets[r_symndx];
9143 offplt = local_tlsdesc_gotents[r_symndx];
9144 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9145 }
9146
9147 /* Linker relaxations happens from one of the
9148 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9149 if (ELF32_R_TYPE(rel->r_info) != r_type)
9150 tls_type = GOT_TLS_IE;
9151
9152 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9153
9154 if ((off & 1) != 0)
9155 off &= ~1;
9156 else
9157 {
9158 bfd_boolean need_relocs = FALSE;
9159 Elf_Internal_Rela outrel;
9160 int cur_off = off;
9161
9162 /* The GOT entries have not been initialized yet. Do it
9163 now, and emit any relocations. If both an IE GOT and a
9164 GD GOT are necessary, we emit the GD first. */
9165
9166 if ((info->shared || indx != 0)
9167 && (h == NULL
9168 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9169 || h->root.type != bfd_link_hash_undefweak))
9170 {
9171 need_relocs = TRUE;
9172 BFD_ASSERT (srelgot != NULL);
9173 }
9174
9175 if (tls_type & GOT_TLS_GDESC)
9176 {
9177 bfd_byte *loc;
9178
9179 /* We should have relaxed, unless this is an undefined
9180 weak symbol. */
9181 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9182 || info->shared);
9183 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9184 <= globals->root.sgotplt->size);
9185
9186 outrel.r_addend = 0;
9187 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9188 + globals->root.sgotplt->output_offset
9189 + offplt
9190 + globals->sgotplt_jump_table_size);
9191
9192 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9193 sreloc = globals->root.srelplt;
9194 loc = sreloc->contents;
9195 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9196 BFD_ASSERT (loc + RELOC_SIZE (globals)
9197 <= sreloc->contents + sreloc->size);
9198
9199 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9200
9201 /* For globals, the first word in the relocation gets
9202 the relocation index and the top bit set, or zero,
9203 if we're binding now. For locals, it gets the
9204 symbol's offset in the tls section. */
9205 bfd_put_32 (output_bfd,
9206 !h ? value - elf_hash_table (info)->tls_sec->vma
9207 : info->flags & DF_BIND_NOW ? 0
9208 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9209 globals->root.sgotplt->contents + offplt +
9210 globals->sgotplt_jump_table_size);
9211
9212 /* Second word in the relocation is always zero. */
9213 bfd_put_32 (output_bfd, 0,
9214 globals->root.sgotplt->contents + offplt +
9215 globals->sgotplt_jump_table_size + 4);
9216 }
9217 if (tls_type & GOT_TLS_GD)
9218 {
9219 if (need_relocs)
9220 {
9221 outrel.r_addend = 0;
9222 outrel.r_offset = (sgot->output_section->vma
9223 + sgot->output_offset
9224 + cur_off);
9225 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9226
9227 if (globals->use_rel)
9228 bfd_put_32 (output_bfd, outrel.r_addend,
9229 sgot->contents + cur_off);
9230
9231 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9232
9233 if (indx == 0)
9234 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9235 sgot->contents + cur_off + 4);
9236 else
9237 {
9238 outrel.r_addend = 0;
9239 outrel.r_info = ELF32_R_INFO (indx,
9240 R_ARM_TLS_DTPOFF32);
9241 outrel.r_offset += 4;
9242
9243 if (globals->use_rel)
9244 bfd_put_32 (output_bfd, outrel.r_addend,
9245 sgot->contents + cur_off + 4);
9246
9247 elf32_arm_add_dynreloc (output_bfd, info,
9248 srelgot, &outrel);
9249 }
9250 }
9251 else
9252 {
9253 /* If we are not emitting relocations for a
9254 general dynamic reference, then we must be in a
9255 static link or an executable link with the
9256 symbol binding locally. Mark it as belonging
9257 to module 1, the executable. */
9258 bfd_put_32 (output_bfd, 1,
9259 sgot->contents + cur_off);
9260 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9261 sgot->contents + cur_off + 4);
9262 }
9263
9264 cur_off += 8;
9265 }
9266
9267 if (tls_type & GOT_TLS_IE)
9268 {
9269 if (need_relocs)
9270 {
9271 if (indx == 0)
9272 outrel.r_addend = value - dtpoff_base (info);
9273 else
9274 outrel.r_addend = 0;
9275 outrel.r_offset = (sgot->output_section->vma
9276 + sgot->output_offset
9277 + cur_off);
9278 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9279
9280 if (globals->use_rel)
9281 bfd_put_32 (output_bfd, outrel.r_addend,
9282 sgot->contents + cur_off);
9283
9284 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9285 }
9286 else
9287 bfd_put_32 (output_bfd, tpoff (info, value),
9288 sgot->contents + cur_off);
9289 cur_off += 4;
9290 }
9291
9292 if (h != NULL)
9293 h->got.offset |= 1;
9294 else
9295 local_got_offsets[r_symndx] |= 1;
9296 }
9297
9298 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9299 off += 8;
9300 else if (tls_type & GOT_TLS_GDESC)
9301 off = offplt;
9302
9303 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9304 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9305 {
9306 bfd_signed_vma offset;
9307 enum elf32_arm_stub_type stub_type
9308 = arm_type_of_stub (info, input_section, rel,
9309 st_type, &branch_type,
9310 (struct elf32_arm_link_hash_entry *)h,
9311 globals->tls_trampoline, globals->root.splt,
9312 input_bfd, sym_name);
9313
9314 if (stub_type != arm_stub_none)
9315 {
9316 struct elf32_arm_stub_hash_entry *stub_entry
9317 = elf32_arm_get_stub_entry
9318 (input_section, globals->root.splt, 0, rel,
9319 globals, stub_type);
9320 offset = (stub_entry->stub_offset
9321 + stub_entry->stub_sec->output_offset
9322 + stub_entry->stub_sec->output_section->vma);
9323 }
9324 else
9325 offset = (globals->root.splt->output_section->vma
9326 + globals->root.splt->output_offset
9327 + globals->tls_trampoline);
9328
9329 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9330 {
9331 unsigned long inst;
9332
9333 offset -= (input_section->output_section->vma +
9334 input_section->output_offset + rel->r_offset + 8);
9335
9336 inst = offset >> 2;
9337 inst &= 0x00ffffff;
9338 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9339 }
9340 else
9341 {
9342 /* Thumb blx encodes the offset in a complicated
9343 fashion. */
9344 unsigned upper_insn, lower_insn;
9345 unsigned neg;
9346
9347 offset -= (input_section->output_section->vma +
9348 input_section->output_offset
9349 + rel->r_offset + 4);
9350
9351 /* Round up the offset to a word boundary */
9352 offset = (offset + 2) & ~2;
9353 neg = offset < 0;
9354 upper_insn = (0xf000
9355 | ((offset >> 12) & 0x3ff)
9356 | (neg << 10));
9357 lower_insn = (0xc000
9358 | (((!((offset >> 23) & 1)) ^ neg) << 13)
9359 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9360 | ((offset >> 1) & 0x7ff));
9361 bfd_put_16 (input_bfd, upper_insn, hit_data);
9362 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9363 return bfd_reloc_ok;
9364 }
9365 }
9366 /* These relocations needs special care, as besides the fact
9367 they point somewhere in .gotplt, the addend must be
9368 adjusted accordingly depending on the type of instruction
9369 we refer to */
9370 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9371 {
9372 unsigned long data, insn;
9373 unsigned thumb;
9374
9375 data = bfd_get_32 (input_bfd, hit_data);
9376 thumb = data & 1;
9377 data &= ~1u;
9378
9379 if (thumb)
9380 {
9381 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9382 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9383 insn = (insn << 16)
9384 | bfd_get_16 (input_bfd,
9385 contents + rel->r_offset - data + 2);
9386 if ((insn & 0xf800c000) == 0xf000c000)
9387 /* bl/blx */
9388 value = -6;
9389 else if ((insn & 0xffffff00) == 0x4400)
9390 /* add */
9391 value = -5;
9392 else
9393 {
9394 (*_bfd_error_handler)
9395 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9396 input_bfd, input_section,
9397 (unsigned long)rel->r_offset, insn);
9398 return bfd_reloc_notsupported;
9399 }
9400 }
9401 else
9402 {
9403 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9404
9405 switch (insn >> 24)
9406 {
9407 case 0xeb: /* bl */
9408 case 0xfa: /* blx */
9409 value = -4;
9410 break;
9411
9412 case 0xe0: /* add */
9413 value = -8;
9414 break;
9415
9416 default:
9417 (*_bfd_error_handler)
9418 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9419 input_bfd, input_section,
9420 (unsigned long)rel->r_offset, insn);
9421 return bfd_reloc_notsupported;
9422 }
9423 }
9424
9425 value += ((globals->root.sgotplt->output_section->vma
9426 + globals->root.sgotplt->output_offset + off)
9427 - (input_section->output_section->vma
9428 + input_section->output_offset
9429 + rel->r_offset)
9430 + globals->sgotplt_jump_table_size);
9431 }
9432 else
9433 value = ((globals->root.sgot->output_section->vma
9434 + globals->root.sgot->output_offset + off)
9435 - (input_section->output_section->vma
9436 + input_section->output_offset + rel->r_offset));
9437
9438 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9439 contents, rel->r_offset, value,
9440 rel->r_addend);
9441 }
9442
9443 case R_ARM_TLS_LE32:
9444 if (info->shared)
9445 {
9446 (*_bfd_error_handler)
9447 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9448 input_bfd, input_section,
9449 (long) rel->r_offset, howto->name);
9450 return (bfd_reloc_status_type) FALSE;
9451 }
9452 else
9453 value = tpoff (info, value);
9454
9455 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9456 contents, rel->r_offset, value,
9457 rel->r_addend);
9458
9459 case R_ARM_V4BX:
9460 if (globals->fix_v4bx)
9461 {
9462 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9463
9464 /* Ensure that we have a BX instruction. */
9465 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9466
9467 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9468 {
9469 /* Branch to veneer. */
9470 bfd_vma glue_addr;
9471 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9472 glue_addr -= input_section->output_section->vma
9473 + input_section->output_offset
9474 + rel->r_offset + 8;
9475 insn = (insn & 0xf0000000) | 0x0a000000
9476 | ((glue_addr >> 2) & 0x00ffffff);
9477 }
9478 else
9479 {
9480 /* Preserve Rm (lowest four bits) and the condition code
9481 (highest four bits). Other bits encode MOV PC,Rm. */
9482 insn = (insn & 0xf000000f) | 0x01a0f000;
9483 }
9484
9485 bfd_put_32 (input_bfd, insn, hit_data);
9486 }
9487 return bfd_reloc_ok;
9488
9489 case R_ARM_MOVW_ABS_NC:
9490 case R_ARM_MOVT_ABS:
9491 case R_ARM_MOVW_PREL_NC:
9492 case R_ARM_MOVT_PREL:
9493 /* Until we properly support segment-base-relative addressing then
9494 we assume the segment base to be zero, as for the group relocations.
9495 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9496 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9497 case R_ARM_MOVW_BREL_NC:
9498 case R_ARM_MOVW_BREL:
9499 case R_ARM_MOVT_BREL:
9500 {
9501 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9502
9503 if (globals->use_rel)
9504 {
9505 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9506 signed_addend = (addend ^ 0x8000) - 0x8000;
9507 }
9508
9509 value += signed_addend;
9510
9511 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9512 value -= (input_section->output_section->vma
9513 + input_section->output_offset + rel->r_offset);
9514
9515 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9516 return bfd_reloc_overflow;
9517
9518 if (branch_type == ST_BRANCH_TO_THUMB)
9519 value |= 1;
9520
9521 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9522 || r_type == R_ARM_MOVT_BREL)
9523 value >>= 16;
9524
9525 insn &= 0xfff0f000;
9526 insn |= value & 0xfff;
9527 insn |= (value & 0xf000) << 4;
9528 bfd_put_32 (input_bfd, insn, hit_data);
9529 }
9530 return bfd_reloc_ok;
9531
9532 case R_ARM_THM_MOVW_ABS_NC:
9533 case R_ARM_THM_MOVT_ABS:
9534 case R_ARM_THM_MOVW_PREL_NC:
9535 case R_ARM_THM_MOVT_PREL:
9536 /* Until we properly support segment-base-relative addressing then
9537 we assume the segment base to be zero, as for the above relocations.
9538 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9539 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9540 as R_ARM_THM_MOVT_ABS. */
9541 case R_ARM_THM_MOVW_BREL_NC:
9542 case R_ARM_THM_MOVW_BREL:
9543 case R_ARM_THM_MOVT_BREL:
9544 {
9545 bfd_vma insn;
9546
9547 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9548 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9549
9550 if (globals->use_rel)
9551 {
9552 addend = ((insn >> 4) & 0xf000)
9553 | ((insn >> 15) & 0x0800)
9554 | ((insn >> 4) & 0x0700)
9555 | (insn & 0x00ff);
9556 signed_addend = (addend ^ 0x8000) - 0x8000;
9557 }
9558
9559 value += signed_addend;
9560
9561 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9562 value -= (input_section->output_section->vma
9563 + input_section->output_offset + rel->r_offset);
9564
9565 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9566 return bfd_reloc_overflow;
9567
9568 if (branch_type == ST_BRANCH_TO_THUMB)
9569 value |= 1;
9570
9571 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9572 || r_type == R_ARM_THM_MOVT_BREL)
9573 value >>= 16;
9574
9575 insn &= 0xfbf08f00;
9576 insn |= (value & 0xf000) << 4;
9577 insn |= (value & 0x0800) << 15;
9578 insn |= (value & 0x0700) << 4;
9579 insn |= (value & 0x00ff);
9580
9581 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9582 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9583 }
9584 return bfd_reloc_ok;
9585
9586 case R_ARM_ALU_PC_G0_NC:
9587 case R_ARM_ALU_PC_G1_NC:
9588 case R_ARM_ALU_PC_G0:
9589 case R_ARM_ALU_PC_G1:
9590 case R_ARM_ALU_PC_G2:
9591 case R_ARM_ALU_SB_G0_NC:
9592 case R_ARM_ALU_SB_G1_NC:
9593 case R_ARM_ALU_SB_G0:
9594 case R_ARM_ALU_SB_G1:
9595 case R_ARM_ALU_SB_G2:
9596 {
9597 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9598 bfd_vma pc = input_section->output_section->vma
9599 + input_section->output_offset + rel->r_offset;
9600 /* sb should be the origin of the *segment* containing the symbol.
9601 It is not clear how to obtain this OS-dependent value, so we
9602 make an arbitrary choice of zero. */
9603 bfd_vma sb = 0;
9604 bfd_vma residual;
9605 bfd_vma g_n;
9606 bfd_signed_vma signed_value;
9607 int group = 0;
9608
9609 /* Determine which group of bits to select. */
9610 switch (r_type)
9611 {
9612 case R_ARM_ALU_PC_G0_NC:
9613 case R_ARM_ALU_PC_G0:
9614 case R_ARM_ALU_SB_G0_NC:
9615 case R_ARM_ALU_SB_G0:
9616 group = 0;
9617 break;
9618
9619 case R_ARM_ALU_PC_G1_NC:
9620 case R_ARM_ALU_PC_G1:
9621 case R_ARM_ALU_SB_G1_NC:
9622 case R_ARM_ALU_SB_G1:
9623 group = 1;
9624 break;
9625
9626 case R_ARM_ALU_PC_G2:
9627 case R_ARM_ALU_SB_G2:
9628 group = 2;
9629 break;
9630
9631 default:
9632 abort ();
9633 }
9634
9635 /* If REL, extract the addend from the insn. If RELA, it will
9636 have already been fetched for us. */
9637 if (globals->use_rel)
9638 {
9639 int negative;
9640 bfd_vma constant = insn & 0xff;
9641 bfd_vma rotation = (insn & 0xf00) >> 8;
9642
9643 if (rotation == 0)
9644 signed_addend = constant;
9645 else
9646 {
9647 /* Compensate for the fact that in the instruction, the
9648 rotation is stored in multiples of 2 bits. */
9649 rotation *= 2;
9650
9651 /* Rotate "constant" right by "rotation" bits. */
9652 signed_addend = (constant >> rotation) |
9653 (constant << (8 * sizeof (bfd_vma) - rotation));
9654 }
9655
9656 /* Determine if the instruction is an ADD or a SUB.
9657 (For REL, this determines the sign of the addend.) */
9658 negative = identify_add_or_sub (insn);
9659 if (negative == 0)
9660 {
9661 (*_bfd_error_handler)
9662 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
9663 input_bfd, input_section,
9664 (long) rel->r_offset, howto->name);
9665 return bfd_reloc_overflow;
9666 }
9667
9668 signed_addend *= negative;
9669 }
9670
9671 /* Compute the value (X) to go in the place. */
9672 if (r_type == R_ARM_ALU_PC_G0_NC
9673 || r_type == R_ARM_ALU_PC_G1_NC
9674 || r_type == R_ARM_ALU_PC_G0
9675 || r_type == R_ARM_ALU_PC_G1
9676 || r_type == R_ARM_ALU_PC_G2)
9677 /* PC relative. */
9678 signed_value = value - pc + signed_addend;
9679 else
9680 /* Section base relative. */
9681 signed_value = value - sb + signed_addend;
9682
9683 /* If the target symbol is a Thumb function, then set the
9684 Thumb bit in the address. */
9685 if (branch_type == ST_BRANCH_TO_THUMB)
9686 signed_value |= 1;
9687
9688 /* Calculate the value of the relevant G_n, in encoded
9689 constant-with-rotation format. */
9690 g_n = calculate_group_reloc_mask (abs (signed_value), group,
9691 &residual);
9692
9693 /* Check for overflow if required. */
9694 if ((r_type == R_ARM_ALU_PC_G0
9695 || r_type == R_ARM_ALU_PC_G1
9696 || r_type == R_ARM_ALU_PC_G2
9697 || r_type == R_ARM_ALU_SB_G0
9698 || r_type == R_ARM_ALU_SB_G1
9699 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
9700 {
9701 (*_bfd_error_handler)
9702 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9703 input_bfd, input_section,
9704 (long) rel->r_offset, abs (signed_value), howto->name);
9705 return bfd_reloc_overflow;
9706 }
9707
9708 /* Mask out the value and the ADD/SUB part of the opcode; take care
9709 not to destroy the S bit. */
9710 insn &= 0xff1ff000;
9711
9712 /* Set the opcode according to whether the value to go in the
9713 place is negative. */
9714 if (signed_value < 0)
9715 insn |= 1 << 22;
9716 else
9717 insn |= 1 << 23;
9718
9719 /* Encode the offset. */
9720 insn |= g_n;
9721
9722 bfd_put_32 (input_bfd, insn, hit_data);
9723 }
9724 return bfd_reloc_ok;
9725
9726 case R_ARM_LDR_PC_G0:
9727 case R_ARM_LDR_PC_G1:
9728 case R_ARM_LDR_PC_G2:
9729 case R_ARM_LDR_SB_G0:
9730 case R_ARM_LDR_SB_G1:
9731 case R_ARM_LDR_SB_G2:
9732 {
9733 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9734 bfd_vma pc = input_section->output_section->vma
9735 + input_section->output_offset + rel->r_offset;
9736 bfd_vma sb = 0; /* See note above. */
9737 bfd_vma residual;
9738 bfd_signed_vma signed_value;
9739 int group = 0;
9740
9741 /* Determine which groups of bits to calculate. */
9742 switch (r_type)
9743 {
9744 case R_ARM_LDR_PC_G0:
9745 case R_ARM_LDR_SB_G0:
9746 group = 0;
9747 break;
9748
9749 case R_ARM_LDR_PC_G1:
9750 case R_ARM_LDR_SB_G1:
9751 group = 1;
9752 break;
9753
9754 case R_ARM_LDR_PC_G2:
9755 case R_ARM_LDR_SB_G2:
9756 group = 2;
9757 break;
9758
9759 default:
9760 abort ();
9761 }
9762
9763 /* If REL, extract the addend from the insn. If RELA, it will
9764 have already been fetched for us. */
9765 if (globals->use_rel)
9766 {
9767 int negative = (insn & (1 << 23)) ? 1 : -1;
9768 signed_addend = negative * (insn & 0xfff);
9769 }
9770
9771 /* Compute the value (X) to go in the place. */
9772 if (r_type == R_ARM_LDR_PC_G0
9773 || r_type == R_ARM_LDR_PC_G1
9774 || r_type == R_ARM_LDR_PC_G2)
9775 /* PC relative. */
9776 signed_value = value - pc + signed_addend;
9777 else
9778 /* Section base relative. */
9779 signed_value = value - sb + signed_addend;
9780
9781 /* Calculate the value of the relevant G_{n-1} to obtain
9782 the residual at that stage. */
9783 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
9784
9785 /* Check for overflow. */
9786 if (residual >= 0x1000)
9787 {
9788 (*_bfd_error_handler)
9789 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9790 input_bfd, input_section,
9791 (long) rel->r_offset, abs (signed_value), howto->name);
9792 return bfd_reloc_overflow;
9793 }
9794
9795 /* Mask out the value and U bit. */
9796 insn &= 0xff7ff000;
9797
9798 /* Set the U bit if the value to go in the place is non-negative. */
9799 if (signed_value >= 0)
9800 insn |= 1 << 23;
9801
9802 /* Encode the offset. */
9803 insn |= residual;
9804
9805 bfd_put_32 (input_bfd, insn, hit_data);
9806 }
9807 return bfd_reloc_ok;
9808
9809 case R_ARM_LDRS_PC_G0:
9810 case R_ARM_LDRS_PC_G1:
9811 case R_ARM_LDRS_PC_G2:
9812 case R_ARM_LDRS_SB_G0:
9813 case R_ARM_LDRS_SB_G1:
9814 case R_ARM_LDRS_SB_G2:
9815 {
9816 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9817 bfd_vma pc = input_section->output_section->vma
9818 + input_section->output_offset + rel->r_offset;
9819 bfd_vma sb = 0; /* See note above. */
9820 bfd_vma residual;
9821 bfd_signed_vma signed_value;
9822 int group = 0;
9823
9824 /* Determine which groups of bits to calculate. */
9825 switch (r_type)
9826 {
9827 case R_ARM_LDRS_PC_G0:
9828 case R_ARM_LDRS_SB_G0:
9829 group = 0;
9830 break;
9831
9832 case R_ARM_LDRS_PC_G1:
9833 case R_ARM_LDRS_SB_G1:
9834 group = 1;
9835 break;
9836
9837 case R_ARM_LDRS_PC_G2:
9838 case R_ARM_LDRS_SB_G2:
9839 group = 2;
9840 break;
9841
9842 default:
9843 abort ();
9844 }
9845
9846 /* If REL, extract the addend from the insn. If RELA, it will
9847 have already been fetched for us. */
9848 if (globals->use_rel)
9849 {
9850 int negative = (insn & (1 << 23)) ? 1 : -1;
9851 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
9852 }
9853
9854 /* Compute the value (X) to go in the place. */
9855 if (r_type == R_ARM_LDRS_PC_G0
9856 || r_type == R_ARM_LDRS_PC_G1
9857 || r_type == R_ARM_LDRS_PC_G2)
9858 /* PC relative. */
9859 signed_value = value - pc + signed_addend;
9860 else
9861 /* Section base relative. */
9862 signed_value = value - sb + signed_addend;
9863
9864 /* Calculate the value of the relevant G_{n-1} to obtain
9865 the residual at that stage. */
9866 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
9867
9868 /* Check for overflow. */
9869 if (residual >= 0x100)
9870 {
9871 (*_bfd_error_handler)
9872 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9873 input_bfd, input_section,
9874 (long) rel->r_offset, abs (signed_value), howto->name);
9875 return bfd_reloc_overflow;
9876 }
9877
9878 /* Mask out the value and U bit. */
9879 insn &= 0xff7ff0f0;
9880
9881 /* Set the U bit if the value to go in the place is non-negative. */
9882 if (signed_value >= 0)
9883 insn |= 1 << 23;
9884
9885 /* Encode the offset. */
9886 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
9887
9888 bfd_put_32 (input_bfd, insn, hit_data);
9889 }
9890 return bfd_reloc_ok;
9891
9892 case R_ARM_LDC_PC_G0:
9893 case R_ARM_LDC_PC_G1:
9894 case R_ARM_LDC_PC_G2:
9895 case R_ARM_LDC_SB_G0:
9896 case R_ARM_LDC_SB_G1:
9897 case R_ARM_LDC_SB_G2:
9898 {
9899 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9900 bfd_vma pc = input_section->output_section->vma
9901 + input_section->output_offset + rel->r_offset;
9902 bfd_vma sb = 0; /* See note above. */
9903 bfd_vma residual;
9904 bfd_signed_vma signed_value;
9905 int group = 0;
9906
9907 /* Determine which groups of bits to calculate. */
9908 switch (r_type)
9909 {
9910 case R_ARM_LDC_PC_G0:
9911 case R_ARM_LDC_SB_G0:
9912 group = 0;
9913 break;
9914
9915 case R_ARM_LDC_PC_G1:
9916 case R_ARM_LDC_SB_G1:
9917 group = 1;
9918 break;
9919
9920 case R_ARM_LDC_PC_G2:
9921 case R_ARM_LDC_SB_G2:
9922 group = 2;
9923 break;
9924
9925 default:
9926 abort ();
9927 }
9928
9929 /* If REL, extract the addend from the insn. If RELA, it will
9930 have already been fetched for us. */
9931 if (globals->use_rel)
9932 {
9933 int negative = (insn & (1 << 23)) ? 1 : -1;
9934 signed_addend = negative * ((insn & 0xff) << 2);
9935 }
9936
9937 /* Compute the value (X) to go in the place. */
9938 if (r_type == R_ARM_LDC_PC_G0
9939 || r_type == R_ARM_LDC_PC_G1
9940 || r_type == R_ARM_LDC_PC_G2)
9941 /* PC relative. */
9942 signed_value = value - pc + signed_addend;
9943 else
9944 /* Section base relative. */
9945 signed_value = value - sb + signed_addend;
9946
9947 /* Calculate the value of the relevant G_{n-1} to obtain
9948 the residual at that stage. */
9949 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
9950
9951 /* Check for overflow. (The absolute value to go in the place must be
9952 divisible by four and, after having been divided by four, must
9953 fit in eight bits.) */
9954 if ((residual & 0x3) != 0 || residual >= 0x400)
9955 {
9956 (*_bfd_error_handler)
9957 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9958 input_bfd, input_section,
9959 (long) rel->r_offset, abs (signed_value), howto->name);
9960 return bfd_reloc_overflow;
9961 }
9962
9963 /* Mask out the value and U bit. */
9964 insn &= 0xff7fff00;
9965
9966 /* Set the U bit if the value to go in the place is non-negative. */
9967 if (signed_value >= 0)
9968 insn |= 1 << 23;
9969
9970 /* Encode the offset. */
9971 insn |= residual >> 2;
9972
9973 bfd_put_32 (input_bfd, insn, hit_data);
9974 }
9975 return bfd_reloc_ok;
9976
9977 default:
9978 return bfd_reloc_notsupported;
9979 }
9980 }
9981
9982 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
9983 static void
9984 arm_add_to_rel (bfd * abfd,
9985 bfd_byte * address,
9986 reloc_howto_type * howto,
9987 bfd_signed_vma increment)
9988 {
9989 bfd_signed_vma addend;
9990
9991 if (howto->type == R_ARM_THM_CALL
9992 || howto->type == R_ARM_THM_JUMP24)
9993 {
9994 int upper_insn, lower_insn;
9995 int upper, lower;
9996
9997 upper_insn = bfd_get_16 (abfd, address);
9998 lower_insn = bfd_get_16 (abfd, address + 2);
9999 upper = upper_insn & 0x7ff;
10000 lower = lower_insn & 0x7ff;
10001
10002 addend = (upper << 12) | (lower << 1);
10003 addend += increment;
10004 addend >>= 1;
10005
10006 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10007 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10008
10009 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10010 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10011 }
10012 else
10013 {
10014 bfd_vma contents;
10015
10016 contents = bfd_get_32 (abfd, address);
10017
10018 /* Get the (signed) value from the instruction. */
10019 addend = contents & howto->src_mask;
10020 if (addend & ((howto->src_mask + 1) >> 1))
10021 {
10022 bfd_signed_vma mask;
10023
10024 mask = -1;
10025 mask &= ~ howto->src_mask;
10026 addend |= mask;
10027 }
10028
10029 /* Add in the increment, (which is a byte value). */
10030 switch (howto->type)
10031 {
10032 default:
10033 addend += increment;
10034 break;
10035
10036 case R_ARM_PC24:
10037 case R_ARM_PLT32:
10038 case R_ARM_CALL:
10039 case R_ARM_JUMP24:
10040 addend <<= howto->size;
10041 addend += increment;
10042
10043 /* Should we check for overflow here ? */
10044
10045 /* Drop any undesired bits. */
10046 addend >>= howto->rightshift;
10047 break;
10048 }
10049
10050 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10051
10052 bfd_put_32 (abfd, contents, address);
10053 }
10054 }
10055
10056 #define IS_ARM_TLS_RELOC(R_TYPE) \
10057 ((R_TYPE) == R_ARM_TLS_GD32 \
10058 || (R_TYPE) == R_ARM_TLS_LDO32 \
10059 || (R_TYPE) == R_ARM_TLS_LDM32 \
10060 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10061 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10062 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10063 || (R_TYPE) == R_ARM_TLS_LE32 \
10064 || (R_TYPE) == R_ARM_TLS_IE32 \
10065 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10066
10067 /* Specific set of relocations for the gnu tls dialect. */
10068 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10069 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10070 || (R_TYPE) == R_ARM_TLS_CALL \
10071 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10072 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10073 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10074
10075 /* Relocate an ARM ELF section. */
10076
10077 static bfd_boolean
10078 elf32_arm_relocate_section (bfd * output_bfd,
10079 struct bfd_link_info * info,
10080 bfd * input_bfd,
10081 asection * input_section,
10082 bfd_byte * contents,
10083 Elf_Internal_Rela * relocs,
10084 Elf_Internal_Sym * local_syms,
10085 asection ** local_sections)
10086 {
10087 Elf_Internal_Shdr *symtab_hdr;
10088 struct elf_link_hash_entry **sym_hashes;
10089 Elf_Internal_Rela *rel;
10090 Elf_Internal_Rela *relend;
10091 const char *name;
10092 struct elf32_arm_link_hash_table * globals;
10093
10094 globals = elf32_arm_hash_table (info);
10095 if (globals == NULL)
10096 return FALSE;
10097
10098 symtab_hdr = & elf_symtab_hdr (input_bfd);
10099 sym_hashes = elf_sym_hashes (input_bfd);
10100
10101 rel = relocs;
10102 relend = relocs + input_section->reloc_count;
10103 for (; rel < relend; rel++)
10104 {
10105 int r_type;
10106 reloc_howto_type * howto;
10107 unsigned long r_symndx;
10108 Elf_Internal_Sym * sym;
10109 asection * sec;
10110 struct elf_link_hash_entry * h;
10111 bfd_vma relocation;
10112 bfd_reloc_status_type r;
10113 arelent bfd_reloc;
10114 char sym_type;
10115 bfd_boolean unresolved_reloc = FALSE;
10116 char *error_message = NULL;
10117
10118 r_symndx = ELF32_R_SYM (rel->r_info);
10119 r_type = ELF32_R_TYPE (rel->r_info);
10120 r_type = arm_real_reloc_type (globals, r_type);
10121
10122 if ( r_type == R_ARM_GNU_VTENTRY
10123 || r_type == R_ARM_GNU_VTINHERIT)
10124 continue;
10125
10126 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10127 howto = bfd_reloc.howto;
10128
10129 h = NULL;
10130 sym = NULL;
10131 sec = NULL;
10132
10133 if (r_symndx < symtab_hdr->sh_info)
10134 {
10135 sym = local_syms + r_symndx;
10136 sym_type = ELF32_ST_TYPE (sym->st_info);
10137 sec = local_sections[r_symndx];
10138
10139 /* An object file might have a reference to a local
10140 undefined symbol. This is a daft object file, but we
10141 should at least do something about it. V4BX & NONE
10142 relocations do not use the symbol and are explicitly
10143 allowed to use the undefined symbol, so allow those.
10144 Likewise for relocations against STN_UNDEF. */
10145 if (r_type != R_ARM_V4BX
10146 && r_type != R_ARM_NONE
10147 && r_symndx != STN_UNDEF
10148 && bfd_is_und_section (sec)
10149 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10150 {
10151 if (!info->callbacks->undefined_symbol
10152 (info, bfd_elf_string_from_elf_section
10153 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10154 input_bfd, input_section,
10155 rel->r_offset, TRUE))
10156 return FALSE;
10157 }
10158
10159 if (globals->use_rel)
10160 {
10161 relocation = (sec->output_section->vma
10162 + sec->output_offset
10163 + sym->st_value);
10164 if (!info->relocatable
10165 && (sec->flags & SEC_MERGE)
10166 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10167 {
10168 asection *msec;
10169 bfd_vma addend, value;
10170
10171 switch (r_type)
10172 {
10173 case R_ARM_MOVW_ABS_NC:
10174 case R_ARM_MOVT_ABS:
10175 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10176 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10177 addend = (addend ^ 0x8000) - 0x8000;
10178 break;
10179
10180 case R_ARM_THM_MOVW_ABS_NC:
10181 case R_ARM_THM_MOVT_ABS:
10182 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10183 << 16;
10184 value |= bfd_get_16 (input_bfd,
10185 contents + rel->r_offset + 2);
10186 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10187 | ((value & 0x04000000) >> 15);
10188 addend = (addend ^ 0x8000) - 0x8000;
10189 break;
10190
10191 default:
10192 if (howto->rightshift
10193 || (howto->src_mask & (howto->src_mask + 1)))
10194 {
10195 (*_bfd_error_handler)
10196 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10197 input_bfd, input_section,
10198 (long) rel->r_offset, howto->name);
10199 return FALSE;
10200 }
10201
10202 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10203
10204 /* Get the (signed) value from the instruction. */
10205 addend = value & howto->src_mask;
10206 if (addend & ((howto->src_mask + 1) >> 1))
10207 {
10208 bfd_signed_vma mask;
10209
10210 mask = -1;
10211 mask &= ~ howto->src_mask;
10212 addend |= mask;
10213 }
10214 break;
10215 }
10216
10217 msec = sec;
10218 addend =
10219 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10220 - relocation;
10221 addend += msec->output_section->vma + msec->output_offset;
10222
10223 /* Cases here must match those in the preceeding
10224 switch statement. */
10225 switch (r_type)
10226 {
10227 case R_ARM_MOVW_ABS_NC:
10228 case R_ARM_MOVT_ABS:
10229 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10230 | (addend & 0xfff);
10231 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10232 break;
10233
10234 case R_ARM_THM_MOVW_ABS_NC:
10235 case R_ARM_THM_MOVT_ABS:
10236 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10237 | (addend & 0xff) | ((addend & 0x0800) << 15);
10238 bfd_put_16 (input_bfd, value >> 16,
10239 contents + rel->r_offset);
10240 bfd_put_16 (input_bfd, value,
10241 contents + rel->r_offset + 2);
10242 break;
10243
10244 default:
10245 value = (value & ~ howto->dst_mask)
10246 | (addend & howto->dst_mask);
10247 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10248 break;
10249 }
10250 }
10251 }
10252 else
10253 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10254 }
10255 else
10256 {
10257 bfd_boolean warned;
10258
10259 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10260 r_symndx, symtab_hdr, sym_hashes,
10261 h, sec, relocation,
10262 unresolved_reloc, warned);
10263
10264 sym_type = h->type;
10265 }
10266
10267 if (sec != NULL && elf_discarded_section (sec))
10268 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10269 rel, relend, howto, contents);
10270
10271 if (info->relocatable)
10272 {
10273 /* This is a relocatable link. We don't have to change
10274 anything, unless the reloc is against a section symbol,
10275 in which case we have to adjust according to where the
10276 section symbol winds up in the output section. */
10277 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10278 {
10279 if (globals->use_rel)
10280 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10281 howto, (bfd_signed_vma) sec->output_offset);
10282 else
10283 rel->r_addend += sec->output_offset;
10284 }
10285 continue;
10286 }
10287
10288 if (h != NULL)
10289 name = h->root.root.string;
10290 else
10291 {
10292 name = (bfd_elf_string_from_elf_section
10293 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10294 if (name == NULL || *name == '\0')
10295 name = bfd_section_name (input_bfd, sec);
10296 }
10297
10298 if (r_symndx != STN_UNDEF
10299 && r_type != R_ARM_NONE
10300 && (h == NULL
10301 || h->root.type == bfd_link_hash_defined
10302 || h->root.type == bfd_link_hash_defweak)
10303 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10304 {
10305 (*_bfd_error_handler)
10306 ((sym_type == STT_TLS
10307 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10308 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10309 input_bfd,
10310 input_section,
10311 (long) rel->r_offset,
10312 howto->name,
10313 name);
10314 }
10315
10316 /* We call elf32_arm_final_link_relocate unless we're completely
10317 done, i.e., the relaxation produced the final output we want,
10318 and we won't let anybody mess with it. Also, we have to do
10319 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10320 both in relaxed and non-relaxed cases */
10321 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10322 || (IS_ARM_TLS_GNU_RELOC (r_type)
10323 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10324 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10325 & GOT_TLS_GDESC)))
10326 {
10327 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10328 contents, rel, h == NULL);
10329 /* This may have been marked unresolved because it came from
10330 a shared library. But we've just dealt with that. */
10331 unresolved_reloc = 0;
10332 }
10333 else
10334 r = bfd_reloc_continue;
10335
10336 if (r == bfd_reloc_continue)
10337 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10338 input_section, contents, rel,
10339 relocation, info, sec, name, sym_type,
10340 (h ? h->target_internal
10341 : ARM_SYM_BRANCH_TYPE (sym)), h,
10342 &unresolved_reloc, &error_message);
10343
10344 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10345 because such sections are not SEC_ALLOC and thus ld.so will
10346 not process them. */
10347 if (unresolved_reloc
10348 && !((input_section->flags & SEC_DEBUGGING) != 0
10349 && h->def_dynamic))
10350 {
10351 (*_bfd_error_handler)
10352 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10353 input_bfd,
10354 input_section,
10355 (long) rel->r_offset,
10356 howto->name,
10357 h->root.root.string);
10358 return FALSE;
10359 }
10360
10361 if (r != bfd_reloc_ok)
10362 {
10363 switch (r)
10364 {
10365 case bfd_reloc_overflow:
10366 /* If the overflowing reloc was to an undefined symbol,
10367 we have already printed one error message and there
10368 is no point complaining again. */
10369 if ((! h ||
10370 h->root.type != bfd_link_hash_undefined)
10371 && (!((*info->callbacks->reloc_overflow)
10372 (info, (h ? &h->root : NULL), name, howto->name,
10373 (bfd_vma) 0, input_bfd, input_section,
10374 rel->r_offset))))
10375 return FALSE;
10376 break;
10377
10378 case bfd_reloc_undefined:
10379 if (!((*info->callbacks->undefined_symbol)
10380 (info, name, input_bfd, input_section,
10381 rel->r_offset, TRUE)))
10382 return FALSE;
10383 break;
10384
10385 case bfd_reloc_outofrange:
10386 error_message = _("out of range");
10387 goto common_error;
10388
10389 case bfd_reloc_notsupported:
10390 error_message = _("unsupported relocation");
10391 goto common_error;
10392
10393 case bfd_reloc_dangerous:
10394 /* error_message should already be set. */
10395 goto common_error;
10396
10397 default:
10398 error_message = _("unknown error");
10399 /* Fall through. */
10400
10401 common_error:
10402 BFD_ASSERT (error_message != NULL);
10403 if (!((*info->callbacks->reloc_dangerous)
10404 (info, error_message, input_bfd, input_section,
10405 rel->r_offset)))
10406 return FALSE;
10407 break;
10408 }
10409 }
10410 }
10411
10412 return TRUE;
10413 }
10414
10415 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10416 adds the edit to the start of the list. (The list must be built in order of
10417 ascending TINDEX: the function's callers are primarily responsible for
10418 maintaining that condition). */
10419
10420 static void
10421 add_unwind_table_edit (arm_unwind_table_edit **head,
10422 arm_unwind_table_edit **tail,
10423 arm_unwind_edit_type type,
10424 asection *linked_section,
10425 unsigned int tindex)
10426 {
10427 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10428 xmalloc (sizeof (arm_unwind_table_edit));
10429
10430 new_edit->type = type;
10431 new_edit->linked_section = linked_section;
10432 new_edit->index = tindex;
10433
10434 if (tindex > 0)
10435 {
10436 new_edit->next = NULL;
10437
10438 if (*tail)
10439 (*tail)->next = new_edit;
10440
10441 (*tail) = new_edit;
10442
10443 if (!*head)
10444 (*head) = new_edit;
10445 }
10446 else
10447 {
10448 new_edit->next = *head;
10449
10450 if (!*tail)
10451 *tail = new_edit;
10452
10453 *head = new_edit;
10454 }
10455 }
10456
10457 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10458
10459 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10460 static void
10461 adjust_exidx_size(asection *exidx_sec, int adjust)
10462 {
10463 asection *out_sec;
10464
10465 if (!exidx_sec->rawsize)
10466 exidx_sec->rawsize = exidx_sec->size;
10467
10468 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10469 out_sec = exidx_sec->output_section;
10470 /* Adjust size of output section. */
10471 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10472 }
10473
10474 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10475 static void
10476 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10477 {
10478 struct _arm_elf_section_data *exidx_arm_data;
10479
10480 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10481 add_unwind_table_edit (
10482 &exidx_arm_data->u.exidx.unwind_edit_list,
10483 &exidx_arm_data->u.exidx.unwind_edit_tail,
10484 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10485
10486 adjust_exidx_size(exidx_sec, 8);
10487 }
10488
10489 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10490 made to those tables, such that:
10491
10492 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10493 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10494 codes which have been inlined into the index).
10495
10496 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10497
10498 The edits are applied when the tables are written
10499 (in elf32_arm_write_section).
10500 */
10501
10502 bfd_boolean
10503 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10504 unsigned int num_text_sections,
10505 struct bfd_link_info *info,
10506 bfd_boolean merge_exidx_entries)
10507 {
10508 bfd *inp;
10509 unsigned int last_second_word = 0, i;
10510 asection *last_exidx_sec = NULL;
10511 asection *last_text_sec = NULL;
10512 int last_unwind_type = -1;
10513
10514 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10515 text sections. */
10516 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
10517 {
10518 asection *sec;
10519
10520 for (sec = inp->sections; sec != NULL; sec = sec->next)
10521 {
10522 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10523 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10524
10525 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10526 continue;
10527
10528 if (elf_sec->linked_to)
10529 {
10530 Elf_Internal_Shdr *linked_hdr
10531 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10532 struct _arm_elf_section_data *linked_sec_arm_data
10533 = get_arm_elf_section_data (linked_hdr->bfd_section);
10534
10535 if (linked_sec_arm_data == NULL)
10536 continue;
10537
10538 /* Link this .ARM.exidx section back from the text section it
10539 describes. */
10540 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10541 }
10542 }
10543 }
10544
10545 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10546 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10547 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10548
10549 for (i = 0; i < num_text_sections; i++)
10550 {
10551 asection *sec = text_section_order[i];
10552 asection *exidx_sec;
10553 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10554 struct _arm_elf_section_data *exidx_arm_data;
10555 bfd_byte *contents = NULL;
10556 int deleted_exidx_bytes = 0;
10557 bfd_vma j;
10558 arm_unwind_table_edit *unwind_edit_head = NULL;
10559 arm_unwind_table_edit *unwind_edit_tail = NULL;
10560 Elf_Internal_Shdr *hdr;
10561 bfd *ibfd;
10562
10563 if (arm_data == NULL)
10564 continue;
10565
10566 exidx_sec = arm_data->u.text.arm_exidx_sec;
10567 if (exidx_sec == NULL)
10568 {
10569 /* Section has no unwind data. */
10570 if (last_unwind_type == 0 || !last_exidx_sec)
10571 continue;
10572
10573 /* Ignore zero sized sections. */
10574 if (sec->size == 0)
10575 continue;
10576
10577 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10578 last_unwind_type = 0;
10579 continue;
10580 }
10581
10582 /* Skip /DISCARD/ sections. */
10583 if (bfd_is_abs_section (exidx_sec->output_section))
10584 continue;
10585
10586 hdr = &elf_section_data (exidx_sec)->this_hdr;
10587 if (hdr->sh_type != SHT_ARM_EXIDX)
10588 continue;
10589
10590 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10591 if (exidx_arm_data == NULL)
10592 continue;
10593
10594 ibfd = exidx_sec->owner;
10595
10596 if (hdr->contents != NULL)
10597 contents = hdr->contents;
10598 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
10599 /* An error? */
10600 continue;
10601
10602 for (j = 0; j < hdr->sh_size; j += 8)
10603 {
10604 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
10605 int unwind_type;
10606 int elide = 0;
10607
10608 /* An EXIDX_CANTUNWIND entry. */
10609 if (second_word == 1)
10610 {
10611 if (last_unwind_type == 0)
10612 elide = 1;
10613 unwind_type = 0;
10614 }
10615 /* Inlined unwinding data. Merge if equal to previous. */
10616 else if ((second_word & 0x80000000) != 0)
10617 {
10618 if (merge_exidx_entries
10619 && last_second_word == second_word && last_unwind_type == 1)
10620 elide = 1;
10621 unwind_type = 1;
10622 last_second_word = second_word;
10623 }
10624 /* Normal table entry. In theory we could merge these too,
10625 but duplicate entries are likely to be much less common. */
10626 else
10627 unwind_type = 2;
10628
10629 if (elide)
10630 {
10631 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
10632 DELETE_EXIDX_ENTRY, NULL, j / 8);
10633
10634 deleted_exidx_bytes += 8;
10635 }
10636
10637 last_unwind_type = unwind_type;
10638 }
10639
10640 /* Free contents if we allocated it ourselves. */
10641 if (contents != hdr->contents)
10642 free (contents);
10643
10644 /* Record edits to be applied later (in elf32_arm_write_section). */
10645 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
10646 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
10647
10648 if (deleted_exidx_bytes > 0)
10649 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
10650
10651 last_exidx_sec = exidx_sec;
10652 last_text_sec = sec;
10653 }
10654
10655 /* Add terminating CANTUNWIND entry. */
10656 if (last_exidx_sec && last_unwind_type != 0)
10657 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10658
10659 return TRUE;
10660 }
10661
10662 static bfd_boolean
10663 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
10664 bfd *ibfd, const char *name)
10665 {
10666 asection *sec, *osec;
10667
10668 sec = bfd_get_section_by_name (ibfd, name);
10669 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
10670 return TRUE;
10671
10672 osec = sec->output_section;
10673 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
10674 return TRUE;
10675
10676 if (! bfd_set_section_contents (obfd, osec, sec->contents,
10677 sec->output_offset, sec->size))
10678 return FALSE;
10679
10680 return TRUE;
10681 }
10682
10683 static bfd_boolean
10684 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
10685 {
10686 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
10687 asection *sec, *osec;
10688
10689 if (globals == NULL)
10690 return FALSE;
10691
10692 /* Invoke the regular ELF backend linker to do all the work. */
10693 if (!bfd_elf_final_link (abfd, info))
10694 return FALSE;
10695
10696 /* Process stub sections (eg BE8 encoding, ...). */
10697 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
10698 int i;
10699 for (i=0; i<htab->top_id; i++)
10700 {
10701 sec = htab->stub_group[i].stub_sec;
10702 /* Only process it once, in its link_sec slot. */
10703 if (sec && i == htab->stub_group[i].link_sec->id)
10704 {
10705 osec = sec->output_section;
10706 elf32_arm_write_section (abfd, info, sec, sec->contents);
10707 if (! bfd_set_section_contents (abfd, osec, sec->contents,
10708 sec->output_offset, sec->size))
10709 return FALSE;
10710 }
10711 }
10712
10713 /* Write out any glue sections now that we have created all the
10714 stubs. */
10715 if (globals->bfd_of_glue_owner != NULL)
10716 {
10717 if (! elf32_arm_output_glue_section (info, abfd,
10718 globals->bfd_of_glue_owner,
10719 ARM2THUMB_GLUE_SECTION_NAME))
10720 return FALSE;
10721
10722 if (! elf32_arm_output_glue_section (info, abfd,
10723 globals->bfd_of_glue_owner,
10724 THUMB2ARM_GLUE_SECTION_NAME))
10725 return FALSE;
10726
10727 if (! elf32_arm_output_glue_section (info, abfd,
10728 globals->bfd_of_glue_owner,
10729 VFP11_ERRATUM_VENEER_SECTION_NAME))
10730 return FALSE;
10731
10732 if (! elf32_arm_output_glue_section (info, abfd,
10733 globals->bfd_of_glue_owner,
10734 ARM_BX_GLUE_SECTION_NAME))
10735 return FALSE;
10736 }
10737
10738 return TRUE;
10739 }
10740
10741 /* Set the right machine number. */
10742
10743 static bfd_boolean
10744 elf32_arm_object_p (bfd *abfd)
10745 {
10746 unsigned int mach;
10747
10748 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
10749
10750 if (mach != bfd_mach_arm_unknown)
10751 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
10752
10753 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
10754 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
10755
10756 else
10757 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
10758
10759 return TRUE;
10760 }
10761
10762 /* Function to keep ARM specific flags in the ELF header. */
10763
10764 static bfd_boolean
10765 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
10766 {
10767 if (elf_flags_init (abfd)
10768 && elf_elfheader (abfd)->e_flags != flags)
10769 {
10770 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
10771 {
10772 if (flags & EF_ARM_INTERWORK)
10773 (*_bfd_error_handler)
10774 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
10775 abfd);
10776 else
10777 _bfd_error_handler
10778 (_("Warning: Clearing the interworking flag of %B due to outside request"),
10779 abfd);
10780 }
10781 }
10782 else
10783 {
10784 elf_elfheader (abfd)->e_flags = flags;
10785 elf_flags_init (abfd) = TRUE;
10786 }
10787
10788 return TRUE;
10789 }
10790
10791 /* Copy backend specific data from one object module to another. */
10792
10793 static bfd_boolean
10794 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
10795 {
10796 flagword in_flags;
10797 flagword out_flags;
10798
10799 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10800 return TRUE;
10801
10802 in_flags = elf_elfheader (ibfd)->e_flags;
10803 out_flags = elf_elfheader (obfd)->e_flags;
10804
10805 if (elf_flags_init (obfd)
10806 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
10807 && in_flags != out_flags)
10808 {
10809 /* Cannot mix APCS26 and APCS32 code. */
10810 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10811 return FALSE;
10812
10813 /* Cannot mix float APCS and non-float APCS code. */
10814 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10815 return FALSE;
10816
10817 /* If the src and dest have different interworking flags
10818 then turn off the interworking bit. */
10819 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10820 {
10821 if (out_flags & EF_ARM_INTERWORK)
10822 _bfd_error_handler
10823 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
10824 obfd, ibfd);
10825
10826 in_flags &= ~EF_ARM_INTERWORK;
10827 }
10828
10829 /* Likewise for PIC, though don't warn for this case. */
10830 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
10831 in_flags &= ~EF_ARM_PIC;
10832 }
10833
10834 elf_elfheader (obfd)->e_flags = in_flags;
10835 elf_flags_init (obfd) = TRUE;
10836
10837 /* Also copy the EI_OSABI field. */
10838 elf_elfheader (obfd)->e_ident[EI_OSABI] =
10839 elf_elfheader (ibfd)->e_ident[EI_OSABI];
10840
10841 /* Copy object attributes. */
10842 _bfd_elf_copy_obj_attributes (ibfd, obfd);
10843
10844 return TRUE;
10845 }
10846
10847 /* Values for Tag_ABI_PCS_R9_use. */
10848 enum
10849 {
10850 AEABI_R9_V6,
10851 AEABI_R9_SB,
10852 AEABI_R9_TLS,
10853 AEABI_R9_unused
10854 };
10855
10856 /* Values for Tag_ABI_PCS_RW_data. */
10857 enum
10858 {
10859 AEABI_PCS_RW_data_absolute,
10860 AEABI_PCS_RW_data_PCrel,
10861 AEABI_PCS_RW_data_SBrel,
10862 AEABI_PCS_RW_data_unused
10863 };
10864
10865 /* Values for Tag_ABI_enum_size. */
10866 enum
10867 {
10868 AEABI_enum_unused,
10869 AEABI_enum_short,
10870 AEABI_enum_wide,
10871 AEABI_enum_forced_wide
10872 };
10873
10874 /* Determine whether an object attribute tag takes an integer, a
10875 string or both. */
10876
10877 static int
10878 elf32_arm_obj_attrs_arg_type (int tag)
10879 {
10880 if (tag == Tag_compatibility)
10881 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
10882 else if (tag == Tag_nodefaults)
10883 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
10884 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
10885 return ATTR_TYPE_FLAG_STR_VAL;
10886 else if (tag < 32)
10887 return ATTR_TYPE_FLAG_INT_VAL;
10888 else
10889 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
10890 }
10891
10892 /* The ABI defines that Tag_conformance should be emitted first, and that
10893 Tag_nodefaults should be second (if either is defined). This sets those
10894 two positions, and bumps up the position of all the remaining tags to
10895 compensate. */
10896 static int
10897 elf32_arm_obj_attrs_order (int num)
10898 {
10899 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
10900 return Tag_conformance;
10901 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
10902 return Tag_nodefaults;
10903 if ((num - 2) < Tag_nodefaults)
10904 return num - 2;
10905 if ((num - 1) < Tag_conformance)
10906 return num - 1;
10907 return num;
10908 }
10909
10910 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10911 static bfd_boolean
10912 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
10913 {
10914 if ((tag & 127) < 64)
10915 {
10916 _bfd_error_handler
10917 (_("%B: Unknown mandatory EABI object attribute %d"),
10918 abfd, tag);
10919 bfd_set_error (bfd_error_bad_value);
10920 return FALSE;
10921 }
10922 else
10923 {
10924 _bfd_error_handler
10925 (_("Warning: %B: Unknown EABI object attribute %d"),
10926 abfd, tag);
10927 return TRUE;
10928 }
10929 }
10930
10931 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
10932 Returns -1 if no architecture could be read. */
10933
10934 static int
10935 get_secondary_compatible_arch (bfd *abfd)
10936 {
10937 obj_attribute *attr =
10938 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
10939
10940 /* Note: the tag and its argument below are uleb128 values, though
10941 currently-defined values fit in one byte for each. */
10942 if (attr->s
10943 && attr->s[0] == Tag_CPU_arch
10944 && (attr->s[1] & 128) != 128
10945 && attr->s[2] == 0)
10946 return attr->s[1];
10947
10948 /* This tag is "safely ignorable", so don't complain if it looks funny. */
10949 return -1;
10950 }
10951
10952 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
10953 The tag is removed if ARCH is -1. */
10954
10955 static void
10956 set_secondary_compatible_arch (bfd *abfd, int arch)
10957 {
10958 obj_attribute *attr =
10959 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
10960
10961 if (arch == -1)
10962 {
10963 attr->s = NULL;
10964 return;
10965 }
10966
10967 /* Note: the tag and its argument below are uleb128 values, though
10968 currently-defined values fit in one byte for each. */
10969 if (!attr->s)
10970 attr->s = (char *) bfd_alloc (abfd, 3);
10971 attr->s[0] = Tag_CPU_arch;
10972 attr->s[1] = arch;
10973 attr->s[2] = '\0';
10974 }
10975
10976 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
10977 into account. */
10978
10979 static int
10980 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
10981 int newtag, int secondary_compat)
10982 {
10983 #define T(X) TAG_CPU_ARCH_##X
10984 int tagl, tagh, result;
10985 const int v6t2[] =
10986 {
10987 T(V6T2), /* PRE_V4. */
10988 T(V6T2), /* V4. */
10989 T(V6T2), /* V4T. */
10990 T(V6T2), /* V5T. */
10991 T(V6T2), /* V5TE. */
10992 T(V6T2), /* V5TEJ. */
10993 T(V6T2), /* V6. */
10994 T(V7), /* V6KZ. */
10995 T(V6T2) /* V6T2. */
10996 };
10997 const int v6k[] =
10998 {
10999 T(V6K), /* PRE_V4. */
11000 T(V6K), /* V4. */
11001 T(V6K), /* V4T. */
11002 T(V6K), /* V5T. */
11003 T(V6K), /* V5TE. */
11004 T(V6K), /* V5TEJ. */
11005 T(V6K), /* V6. */
11006 T(V6KZ), /* V6KZ. */
11007 T(V7), /* V6T2. */
11008 T(V6K) /* V6K. */
11009 };
11010 const int v7[] =
11011 {
11012 T(V7), /* PRE_V4. */
11013 T(V7), /* V4. */
11014 T(V7), /* V4T. */
11015 T(V7), /* V5T. */
11016 T(V7), /* V5TE. */
11017 T(V7), /* V5TEJ. */
11018 T(V7), /* V6. */
11019 T(V7), /* V6KZ. */
11020 T(V7), /* V6T2. */
11021 T(V7), /* V6K. */
11022 T(V7) /* V7. */
11023 };
11024 const int v6_m[] =
11025 {
11026 -1, /* PRE_V4. */
11027 -1, /* V4. */
11028 T(V6K), /* V4T. */
11029 T(V6K), /* V5T. */
11030 T(V6K), /* V5TE. */
11031 T(V6K), /* V5TEJ. */
11032 T(V6K), /* V6. */
11033 T(V6KZ), /* V6KZ. */
11034 T(V7), /* V6T2. */
11035 T(V6K), /* V6K. */
11036 T(V7), /* V7. */
11037 T(V6_M) /* V6_M. */
11038 };
11039 const int v6s_m[] =
11040 {
11041 -1, /* PRE_V4. */
11042 -1, /* V4. */
11043 T(V6K), /* V4T. */
11044 T(V6K), /* V5T. */
11045 T(V6K), /* V5TE. */
11046 T(V6K), /* V5TEJ. */
11047 T(V6K), /* V6. */
11048 T(V6KZ), /* V6KZ. */
11049 T(V7), /* V6T2. */
11050 T(V6K), /* V6K. */
11051 T(V7), /* V7. */
11052 T(V6S_M), /* V6_M. */
11053 T(V6S_M) /* V6S_M. */
11054 };
11055 const int v7e_m[] =
11056 {
11057 -1, /* PRE_V4. */
11058 -1, /* V4. */
11059 T(V7E_M), /* V4T. */
11060 T(V7E_M), /* V5T. */
11061 T(V7E_M), /* V5TE. */
11062 T(V7E_M), /* V5TEJ. */
11063 T(V7E_M), /* V6. */
11064 T(V7E_M), /* V6KZ. */
11065 T(V7E_M), /* V6T2. */
11066 T(V7E_M), /* V6K. */
11067 T(V7E_M), /* V7. */
11068 T(V7E_M), /* V6_M. */
11069 T(V7E_M), /* V6S_M. */
11070 T(V7E_M) /* V7E_M. */
11071 };
11072 const int v4t_plus_v6_m[] =
11073 {
11074 -1, /* PRE_V4. */
11075 -1, /* V4. */
11076 T(V4T), /* V4T. */
11077 T(V5T), /* V5T. */
11078 T(V5TE), /* V5TE. */
11079 T(V5TEJ), /* V5TEJ. */
11080 T(V6), /* V6. */
11081 T(V6KZ), /* V6KZ. */
11082 T(V6T2), /* V6T2. */
11083 T(V6K), /* V6K. */
11084 T(V7), /* V7. */
11085 T(V6_M), /* V6_M. */
11086 T(V6S_M), /* V6S_M. */
11087 T(V7E_M), /* V7E_M. */
11088 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11089 };
11090 const int *comb[] =
11091 {
11092 v6t2,
11093 v6k,
11094 v7,
11095 v6_m,
11096 v6s_m,
11097 v7e_m,
11098 /* Pseudo-architecture. */
11099 v4t_plus_v6_m
11100 };
11101
11102 /* Check we've not got a higher architecture than we know about. */
11103
11104 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11105 {
11106 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11107 return -1;
11108 }
11109
11110 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11111
11112 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11113 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11114 oldtag = T(V4T_PLUS_V6_M);
11115
11116 /* And override the new tag if we have a Tag_also_compatible_with on the
11117 input. */
11118
11119 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11120 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11121 newtag = T(V4T_PLUS_V6_M);
11122
11123 tagl = (oldtag < newtag) ? oldtag : newtag;
11124 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11125
11126 /* Architectures before V6KZ add features monotonically. */
11127 if (tagh <= TAG_CPU_ARCH_V6KZ)
11128 return result;
11129
11130 result = comb[tagh - T(V6T2)][tagl];
11131
11132 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11133 as the canonical version. */
11134 if (result == T(V4T_PLUS_V6_M))
11135 {
11136 result = T(V4T);
11137 *secondary_compat_out = T(V6_M);
11138 }
11139 else
11140 *secondary_compat_out = -1;
11141
11142 if (result == -1)
11143 {
11144 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11145 ibfd, oldtag, newtag);
11146 return -1;
11147 }
11148
11149 return result;
11150 #undef T
11151 }
11152
11153 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11154 are conflicting attributes. */
11155
11156 static bfd_boolean
11157 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11158 {
11159 obj_attribute *in_attr;
11160 obj_attribute *out_attr;
11161 /* Some tags have 0 = don't care, 1 = strong requirement,
11162 2 = weak requirement. */
11163 static const int order_021[3] = {0, 2, 1};
11164 int i;
11165 bfd_boolean result = TRUE;
11166
11167 /* Skip the linker stubs file. This preserves previous behavior
11168 of accepting unknown attributes in the first input file - but
11169 is that a bug? */
11170 if (ibfd->flags & BFD_LINKER_CREATED)
11171 return TRUE;
11172
11173 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11174 {
11175 /* This is the first object. Copy the attributes. */
11176 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11177
11178 out_attr = elf_known_obj_attributes_proc (obfd);
11179
11180 /* Use the Tag_null value to indicate the attributes have been
11181 initialized. */
11182 out_attr[0].i = 1;
11183
11184 /* We do not output objects with Tag_MPextension_use_legacy - we move
11185 the attribute's value to Tag_MPextension_use. */
11186 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11187 {
11188 if (out_attr[Tag_MPextension_use].i != 0
11189 && out_attr[Tag_MPextension_use_legacy].i
11190 != out_attr[Tag_MPextension_use].i)
11191 {
11192 _bfd_error_handler
11193 (_("Error: %B has both the current and legacy "
11194 "Tag_MPextension_use attributes"), ibfd);
11195 result = FALSE;
11196 }
11197
11198 out_attr[Tag_MPextension_use] =
11199 out_attr[Tag_MPextension_use_legacy];
11200 out_attr[Tag_MPextension_use_legacy].type = 0;
11201 out_attr[Tag_MPextension_use_legacy].i = 0;
11202 }
11203
11204 return result;
11205 }
11206
11207 in_attr = elf_known_obj_attributes_proc (ibfd);
11208 out_attr = elf_known_obj_attributes_proc (obfd);
11209 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11210 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11211 {
11212 /* Ignore mismatches if the object doesn't use floating point. */
11213 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11214 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11215 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11216 {
11217 _bfd_error_handler
11218 (_("error: %B uses VFP register arguments, %B does not"),
11219 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11220 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11221 result = FALSE;
11222 }
11223 }
11224
11225 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11226 {
11227 /* Merge this attribute with existing attributes. */
11228 switch (i)
11229 {
11230 case Tag_CPU_raw_name:
11231 case Tag_CPU_name:
11232 /* These are merged after Tag_CPU_arch. */
11233 break;
11234
11235 case Tag_ABI_optimization_goals:
11236 case Tag_ABI_FP_optimization_goals:
11237 /* Use the first value seen. */
11238 break;
11239
11240 case Tag_CPU_arch:
11241 {
11242 int secondary_compat = -1, secondary_compat_out = -1;
11243 unsigned int saved_out_attr = out_attr[i].i;
11244 static const char *name_table[] = {
11245 /* These aren't real CPU names, but we can't guess
11246 that from the architecture version alone. */
11247 "Pre v4",
11248 "ARM v4",
11249 "ARM v4T",
11250 "ARM v5T",
11251 "ARM v5TE",
11252 "ARM v5TEJ",
11253 "ARM v6",
11254 "ARM v6KZ",
11255 "ARM v6T2",
11256 "ARM v6K",
11257 "ARM v7",
11258 "ARM v6-M",
11259 "ARM v6S-M"
11260 };
11261
11262 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11263 secondary_compat = get_secondary_compatible_arch (ibfd);
11264 secondary_compat_out = get_secondary_compatible_arch (obfd);
11265 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11266 &secondary_compat_out,
11267 in_attr[i].i,
11268 secondary_compat);
11269 set_secondary_compatible_arch (obfd, secondary_compat_out);
11270
11271 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11272 if (out_attr[i].i == saved_out_attr)
11273 ; /* Leave the names alone. */
11274 else if (out_attr[i].i == in_attr[i].i)
11275 {
11276 /* The output architecture has been changed to match the
11277 input architecture. Use the input names. */
11278 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11279 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11280 : NULL;
11281 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11282 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11283 : NULL;
11284 }
11285 else
11286 {
11287 out_attr[Tag_CPU_name].s = NULL;
11288 out_attr[Tag_CPU_raw_name].s = NULL;
11289 }
11290
11291 /* If we still don't have a value for Tag_CPU_name,
11292 make one up now. Tag_CPU_raw_name remains blank. */
11293 if (out_attr[Tag_CPU_name].s == NULL
11294 && out_attr[i].i < ARRAY_SIZE (name_table))
11295 out_attr[Tag_CPU_name].s =
11296 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11297 }
11298 break;
11299
11300 case Tag_ARM_ISA_use:
11301 case Tag_THUMB_ISA_use:
11302 case Tag_WMMX_arch:
11303 case Tag_Advanced_SIMD_arch:
11304 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11305 case Tag_ABI_FP_rounding:
11306 case Tag_ABI_FP_exceptions:
11307 case Tag_ABI_FP_user_exceptions:
11308 case Tag_ABI_FP_number_model:
11309 case Tag_FP_HP_extension:
11310 case Tag_CPU_unaligned_access:
11311 case Tag_T2EE_use:
11312 case Tag_MPextension_use:
11313 /* Use the largest value specified. */
11314 if (in_attr[i].i > out_attr[i].i)
11315 out_attr[i].i = in_attr[i].i;
11316 break;
11317
11318 case Tag_ABI_align_preserved:
11319 case Tag_ABI_PCS_RO_data:
11320 /* Use the smallest value specified. */
11321 if (in_attr[i].i < out_attr[i].i)
11322 out_attr[i].i = in_attr[i].i;
11323 break;
11324
11325 case Tag_ABI_align_needed:
11326 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11327 && (in_attr[Tag_ABI_align_preserved].i == 0
11328 || out_attr[Tag_ABI_align_preserved].i == 0))
11329 {
11330 /* This error message should be enabled once all non-conformant
11331 binaries in the toolchain have had the attributes set
11332 properly.
11333 _bfd_error_handler
11334 (_("error: %B: 8-byte data alignment conflicts with %B"),
11335 obfd, ibfd);
11336 result = FALSE; */
11337 }
11338 /* Fall through. */
11339 case Tag_ABI_FP_denormal:
11340 case Tag_ABI_PCS_GOT_use:
11341 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11342 value if greater than 2 (for future-proofing). */
11343 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11344 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11345 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11346 out_attr[i].i = in_attr[i].i;
11347 break;
11348
11349 case Tag_Virtualization_use:
11350 /* The virtualization tag effectively stores two bits of
11351 information: the intended use of TrustZone (in bit 0), and the
11352 intended use of Virtualization (in bit 1). */
11353 if (out_attr[i].i == 0)
11354 out_attr[i].i = in_attr[i].i;
11355 else if (in_attr[i].i != 0
11356 && in_attr[i].i != out_attr[i].i)
11357 {
11358 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11359 out_attr[i].i = 3;
11360 else
11361 {
11362 _bfd_error_handler
11363 (_("error: %B: unable to merge virtualization attributes "
11364 "with %B"),
11365 obfd, ibfd);
11366 result = FALSE;
11367 }
11368 }
11369 break;
11370
11371 case Tag_CPU_arch_profile:
11372 if (out_attr[i].i != in_attr[i].i)
11373 {
11374 /* 0 will merge with anything.
11375 'A' and 'S' merge to 'A'.
11376 'R' and 'S' merge to 'R'.
11377 'M' and 'A|R|S' is an error. */
11378 if (out_attr[i].i == 0
11379 || (out_attr[i].i == 'S'
11380 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11381 out_attr[i].i = in_attr[i].i;
11382 else if (in_attr[i].i == 0
11383 || (in_attr[i].i == 'S'
11384 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11385 ; /* Do nothing. */
11386 else
11387 {
11388 _bfd_error_handler
11389 (_("error: %B: Conflicting architecture profiles %c/%c"),
11390 ibfd,
11391 in_attr[i].i ? in_attr[i].i : '0',
11392 out_attr[i].i ? out_attr[i].i : '0');
11393 result = FALSE;
11394 }
11395 }
11396 break;
11397 case Tag_FP_arch:
11398 {
11399 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11400 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11401 when it's 0. It might mean absence of FP hardware if
11402 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11403
11404 static const struct
11405 {
11406 int ver;
11407 int regs;
11408 } vfp_versions[7] =
11409 {
11410 {0, 0},
11411 {1, 16},
11412 {2, 16},
11413 {3, 32},
11414 {3, 16},
11415 {4, 32},
11416 {4, 16}
11417 };
11418 int ver;
11419 int regs;
11420 int newval;
11421
11422 /* If the output has no requirement about FP hardware,
11423 follow the requirement of the input. */
11424 if (out_attr[i].i == 0)
11425 {
11426 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11427 out_attr[i].i = in_attr[i].i;
11428 out_attr[Tag_ABI_HardFP_use].i
11429 = in_attr[Tag_ABI_HardFP_use].i;
11430 break;
11431 }
11432 /* If the input has no requirement about FP hardware, do
11433 nothing. */
11434 else if (in_attr[i].i == 0)
11435 {
11436 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11437 break;
11438 }
11439
11440 /* Both the input and the output have nonzero Tag_FP_arch.
11441 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11442
11443 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11444 do nothing. */
11445 if (in_attr[Tag_ABI_HardFP_use].i == 0
11446 && out_attr[Tag_ABI_HardFP_use].i == 0)
11447 ;
11448 /* If the input and the output have different Tag_ABI_HardFP_use,
11449 the combination of them is 3 (SP & DP). */
11450 else if (in_attr[Tag_ABI_HardFP_use].i
11451 != out_attr[Tag_ABI_HardFP_use].i)
11452 out_attr[Tag_ABI_HardFP_use].i = 3;
11453
11454 /* Now we can handle Tag_FP_arch. */
11455
11456 /* Values greater than 6 aren't defined, so just pick the
11457 biggest */
11458 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
11459 {
11460 out_attr[i] = in_attr[i];
11461 break;
11462 }
11463 /* The output uses the superset of input features
11464 (ISA version) and registers. */
11465 ver = vfp_versions[in_attr[i].i].ver;
11466 if (ver < vfp_versions[out_attr[i].i].ver)
11467 ver = vfp_versions[out_attr[i].i].ver;
11468 regs = vfp_versions[in_attr[i].i].regs;
11469 if (regs < vfp_versions[out_attr[i].i].regs)
11470 regs = vfp_versions[out_attr[i].i].regs;
11471 /* This assumes all possible supersets are also a valid
11472 options. */
11473 for (newval = 6; newval > 0; newval--)
11474 {
11475 if (regs == vfp_versions[newval].regs
11476 && ver == vfp_versions[newval].ver)
11477 break;
11478 }
11479 out_attr[i].i = newval;
11480 }
11481 break;
11482 case Tag_PCS_config:
11483 if (out_attr[i].i == 0)
11484 out_attr[i].i = in_attr[i].i;
11485 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
11486 {
11487 /* It's sometimes ok to mix different configs, so this is only
11488 a warning. */
11489 _bfd_error_handler
11490 (_("Warning: %B: Conflicting platform configuration"), ibfd);
11491 }
11492 break;
11493 case Tag_ABI_PCS_R9_use:
11494 if (in_attr[i].i != out_attr[i].i
11495 && out_attr[i].i != AEABI_R9_unused
11496 && in_attr[i].i != AEABI_R9_unused)
11497 {
11498 _bfd_error_handler
11499 (_("error: %B: Conflicting use of R9"), ibfd);
11500 result = FALSE;
11501 }
11502 if (out_attr[i].i == AEABI_R9_unused)
11503 out_attr[i].i = in_attr[i].i;
11504 break;
11505 case Tag_ABI_PCS_RW_data:
11506 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
11507 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
11508 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
11509 {
11510 _bfd_error_handler
11511 (_("error: %B: SB relative addressing conflicts with use of R9"),
11512 ibfd);
11513 result = FALSE;
11514 }
11515 /* Use the smallest value specified. */
11516 if (in_attr[i].i < out_attr[i].i)
11517 out_attr[i].i = in_attr[i].i;
11518 break;
11519 case Tag_ABI_PCS_wchar_t:
11520 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
11521 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
11522 {
11523 _bfd_error_handler
11524 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
11525 ibfd, in_attr[i].i, out_attr[i].i);
11526 }
11527 else if (in_attr[i].i && !out_attr[i].i)
11528 out_attr[i].i = in_attr[i].i;
11529 break;
11530 case Tag_ABI_enum_size:
11531 if (in_attr[i].i != AEABI_enum_unused)
11532 {
11533 if (out_attr[i].i == AEABI_enum_unused
11534 || out_attr[i].i == AEABI_enum_forced_wide)
11535 {
11536 /* The existing object is compatible with anything.
11537 Use whatever requirements the new object has. */
11538 out_attr[i].i = in_attr[i].i;
11539 }
11540 else if (in_attr[i].i != AEABI_enum_forced_wide
11541 && out_attr[i].i != in_attr[i].i
11542 && !elf_arm_tdata (obfd)->no_enum_size_warning)
11543 {
11544 static const char *aeabi_enum_names[] =
11545 { "", "variable-size", "32-bit", "" };
11546 const char *in_name =
11547 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11548 ? aeabi_enum_names[in_attr[i].i]
11549 : "<unknown>";
11550 const char *out_name =
11551 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11552 ? aeabi_enum_names[out_attr[i].i]
11553 : "<unknown>";
11554 _bfd_error_handler
11555 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
11556 ibfd, in_name, out_name);
11557 }
11558 }
11559 break;
11560 case Tag_ABI_VFP_args:
11561 /* Aready done. */
11562 break;
11563 case Tag_ABI_WMMX_args:
11564 if (in_attr[i].i != out_attr[i].i)
11565 {
11566 _bfd_error_handler
11567 (_("error: %B uses iWMMXt register arguments, %B does not"),
11568 ibfd, obfd);
11569 result = FALSE;
11570 }
11571 break;
11572 case Tag_compatibility:
11573 /* Merged in target-independent code. */
11574 break;
11575 case Tag_ABI_HardFP_use:
11576 /* This is handled along with Tag_FP_arch. */
11577 break;
11578 case Tag_ABI_FP_16bit_format:
11579 if (in_attr[i].i != 0 && out_attr[i].i != 0)
11580 {
11581 if (in_attr[i].i != out_attr[i].i)
11582 {
11583 _bfd_error_handler
11584 (_("error: fp16 format mismatch between %B and %B"),
11585 ibfd, obfd);
11586 result = FALSE;
11587 }
11588 }
11589 if (in_attr[i].i != 0)
11590 out_attr[i].i = in_attr[i].i;
11591 break;
11592
11593 case Tag_DIV_use:
11594 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
11595 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
11596 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
11597 CPU. We will merge as follows: If the input attribute's value
11598 is one then the output attribute's value remains unchanged. If
11599 the input attribute's value is zero or two then if the output
11600 attribute's value is one the output value is set to the input
11601 value, otherwise the output value must be the same as the
11602 inputs. */
11603 if (in_attr[i].i != 1 && out_attr[i].i != 1)
11604 {
11605 if (in_attr[i].i != out_attr[i].i)
11606 {
11607 _bfd_error_handler
11608 (_("DIV usage mismatch between %B and %B"),
11609 ibfd, obfd);
11610 result = FALSE;
11611 }
11612 }
11613
11614 if (in_attr[i].i != 1)
11615 out_attr[i].i = in_attr[i].i;
11616
11617 break;
11618
11619 case Tag_MPextension_use_legacy:
11620 /* We don't output objects with Tag_MPextension_use_legacy - we
11621 move the value to Tag_MPextension_use. */
11622 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
11623 {
11624 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
11625 {
11626 _bfd_error_handler
11627 (_("%B has has both the current and legacy "
11628 "Tag_MPextension_use attributes"),
11629 ibfd);
11630 result = FALSE;
11631 }
11632 }
11633
11634 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
11635 out_attr[Tag_MPextension_use] = in_attr[i];
11636
11637 break;
11638
11639 case Tag_nodefaults:
11640 /* This tag is set if it exists, but the value is unused (and is
11641 typically zero). We don't actually need to do anything here -
11642 the merge happens automatically when the type flags are merged
11643 below. */
11644 break;
11645 case Tag_also_compatible_with:
11646 /* Already done in Tag_CPU_arch. */
11647 break;
11648 case Tag_conformance:
11649 /* Keep the attribute if it matches. Throw it away otherwise.
11650 No attribute means no claim to conform. */
11651 if (!in_attr[i].s || !out_attr[i].s
11652 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
11653 out_attr[i].s = NULL;
11654 break;
11655
11656 default:
11657 result
11658 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
11659 }
11660
11661 /* If out_attr was copied from in_attr then it won't have a type yet. */
11662 if (in_attr[i].type && !out_attr[i].type)
11663 out_attr[i].type = in_attr[i].type;
11664 }
11665
11666 /* Merge Tag_compatibility attributes and any common GNU ones. */
11667 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
11668 return FALSE;
11669
11670 /* Check for any attributes not known on ARM. */
11671 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
11672
11673 return result;
11674 }
11675
11676
11677 /* Return TRUE if the two EABI versions are incompatible. */
11678
11679 static bfd_boolean
11680 elf32_arm_versions_compatible (unsigned iver, unsigned over)
11681 {
11682 /* v4 and v5 are the same spec before and after it was released,
11683 so allow mixing them. */
11684 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
11685 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
11686 return TRUE;
11687
11688 return (iver == over);
11689 }
11690
11691 /* Merge backend specific data from an object file to the output
11692 object file when linking. */
11693
11694 static bfd_boolean
11695 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
11696
11697 /* Display the flags field. */
11698
11699 static bfd_boolean
11700 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
11701 {
11702 FILE * file = (FILE *) ptr;
11703 unsigned long flags;
11704
11705 BFD_ASSERT (abfd != NULL && ptr != NULL);
11706
11707 /* Print normal ELF private data. */
11708 _bfd_elf_print_private_bfd_data (abfd, ptr);
11709
11710 flags = elf_elfheader (abfd)->e_flags;
11711 /* Ignore init flag - it may not be set, despite the flags field
11712 containing valid data. */
11713
11714 /* xgettext:c-format */
11715 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
11716
11717 switch (EF_ARM_EABI_VERSION (flags))
11718 {
11719 case EF_ARM_EABI_UNKNOWN:
11720 /* The following flag bits are GNU extensions and not part of the
11721 official ARM ELF extended ABI. Hence they are only decoded if
11722 the EABI version is not set. */
11723 if (flags & EF_ARM_INTERWORK)
11724 fprintf (file, _(" [interworking enabled]"));
11725
11726 if (flags & EF_ARM_APCS_26)
11727 fprintf (file, " [APCS-26]");
11728 else
11729 fprintf (file, " [APCS-32]");
11730
11731 if (flags & EF_ARM_VFP_FLOAT)
11732 fprintf (file, _(" [VFP float format]"));
11733 else if (flags & EF_ARM_MAVERICK_FLOAT)
11734 fprintf (file, _(" [Maverick float format]"));
11735 else
11736 fprintf (file, _(" [FPA float format]"));
11737
11738 if (flags & EF_ARM_APCS_FLOAT)
11739 fprintf (file, _(" [floats passed in float registers]"));
11740
11741 if (flags & EF_ARM_PIC)
11742 fprintf (file, _(" [position independent]"));
11743
11744 if (flags & EF_ARM_NEW_ABI)
11745 fprintf (file, _(" [new ABI]"));
11746
11747 if (flags & EF_ARM_OLD_ABI)
11748 fprintf (file, _(" [old ABI]"));
11749
11750 if (flags & EF_ARM_SOFT_FLOAT)
11751 fprintf (file, _(" [software FP]"));
11752
11753 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
11754 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
11755 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
11756 | EF_ARM_MAVERICK_FLOAT);
11757 break;
11758
11759 case EF_ARM_EABI_VER1:
11760 fprintf (file, _(" [Version1 EABI]"));
11761
11762 if (flags & EF_ARM_SYMSARESORTED)
11763 fprintf (file, _(" [sorted symbol table]"));
11764 else
11765 fprintf (file, _(" [unsorted symbol table]"));
11766
11767 flags &= ~ EF_ARM_SYMSARESORTED;
11768 break;
11769
11770 case EF_ARM_EABI_VER2:
11771 fprintf (file, _(" [Version2 EABI]"));
11772
11773 if (flags & EF_ARM_SYMSARESORTED)
11774 fprintf (file, _(" [sorted symbol table]"));
11775 else
11776 fprintf (file, _(" [unsorted symbol table]"));
11777
11778 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
11779 fprintf (file, _(" [dynamic symbols use segment index]"));
11780
11781 if (flags & EF_ARM_MAPSYMSFIRST)
11782 fprintf (file, _(" [mapping symbols precede others]"));
11783
11784 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
11785 | EF_ARM_MAPSYMSFIRST);
11786 break;
11787
11788 case EF_ARM_EABI_VER3:
11789 fprintf (file, _(" [Version3 EABI]"));
11790 break;
11791
11792 case EF_ARM_EABI_VER4:
11793 fprintf (file, _(" [Version4 EABI]"));
11794 goto eabi;
11795
11796 case EF_ARM_EABI_VER5:
11797 fprintf (file, _(" [Version5 EABI]"));
11798 eabi:
11799 if (flags & EF_ARM_BE8)
11800 fprintf (file, _(" [BE8]"));
11801
11802 if (flags & EF_ARM_LE8)
11803 fprintf (file, _(" [LE8]"));
11804
11805 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
11806 break;
11807
11808 default:
11809 fprintf (file, _(" <EABI version unrecognised>"));
11810 break;
11811 }
11812
11813 flags &= ~ EF_ARM_EABIMASK;
11814
11815 if (flags & EF_ARM_RELEXEC)
11816 fprintf (file, _(" [relocatable executable]"));
11817
11818 if (flags & EF_ARM_HASENTRY)
11819 fprintf (file, _(" [has entry point]"));
11820
11821 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
11822
11823 if (flags)
11824 fprintf (file, _("<Unrecognised flag bits set>"));
11825
11826 fputc ('\n', file);
11827
11828 return TRUE;
11829 }
11830
11831 static int
11832 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
11833 {
11834 switch (ELF_ST_TYPE (elf_sym->st_info))
11835 {
11836 case STT_ARM_TFUNC:
11837 return ELF_ST_TYPE (elf_sym->st_info);
11838
11839 case STT_ARM_16BIT:
11840 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
11841 This allows us to distinguish between data used by Thumb instructions
11842 and non-data (which is probably code) inside Thumb regions of an
11843 executable. */
11844 if (type != STT_OBJECT && type != STT_TLS)
11845 return ELF_ST_TYPE (elf_sym->st_info);
11846 break;
11847
11848 default:
11849 break;
11850 }
11851
11852 return type;
11853 }
11854
11855 static asection *
11856 elf32_arm_gc_mark_hook (asection *sec,
11857 struct bfd_link_info *info,
11858 Elf_Internal_Rela *rel,
11859 struct elf_link_hash_entry *h,
11860 Elf_Internal_Sym *sym)
11861 {
11862 if (h != NULL)
11863 switch (ELF32_R_TYPE (rel->r_info))
11864 {
11865 case R_ARM_GNU_VTINHERIT:
11866 case R_ARM_GNU_VTENTRY:
11867 return NULL;
11868 }
11869
11870 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
11871 }
11872
11873 /* Update the got entry reference counts for the section being removed. */
11874
11875 static bfd_boolean
11876 elf32_arm_gc_sweep_hook (bfd * abfd,
11877 struct bfd_link_info * info,
11878 asection * sec,
11879 const Elf_Internal_Rela * relocs)
11880 {
11881 Elf_Internal_Shdr *symtab_hdr;
11882 struct elf_link_hash_entry **sym_hashes;
11883 bfd_signed_vma *local_got_refcounts;
11884 const Elf_Internal_Rela *rel, *relend;
11885 struct elf32_arm_link_hash_table * globals;
11886
11887 if (info->relocatable)
11888 return TRUE;
11889
11890 globals = elf32_arm_hash_table (info);
11891 if (globals == NULL)
11892 return FALSE;
11893
11894 elf_section_data (sec)->local_dynrel = NULL;
11895
11896 symtab_hdr = & elf_symtab_hdr (abfd);
11897 sym_hashes = elf_sym_hashes (abfd);
11898 local_got_refcounts = elf_local_got_refcounts (abfd);
11899
11900 check_use_blx (globals);
11901
11902 relend = relocs + sec->reloc_count;
11903 for (rel = relocs; rel < relend; rel++)
11904 {
11905 unsigned long r_symndx;
11906 struct elf_link_hash_entry *h = NULL;
11907 struct elf32_arm_link_hash_entry *eh;
11908 int r_type;
11909 bfd_boolean call_reloc_p;
11910 bfd_boolean may_become_dynamic_p;
11911 bfd_boolean may_need_local_target_p;
11912 union gotplt_union *root_plt;
11913 struct arm_plt_info *arm_plt;
11914
11915 r_symndx = ELF32_R_SYM (rel->r_info);
11916 if (r_symndx >= symtab_hdr->sh_info)
11917 {
11918 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
11919 while (h->root.type == bfd_link_hash_indirect
11920 || h->root.type == bfd_link_hash_warning)
11921 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11922 }
11923 eh = (struct elf32_arm_link_hash_entry *) h;
11924
11925 call_reloc_p = FALSE;
11926 may_become_dynamic_p = FALSE;
11927 may_need_local_target_p = FALSE;
11928
11929 r_type = ELF32_R_TYPE (rel->r_info);
11930 r_type = arm_real_reloc_type (globals, r_type);
11931 switch (r_type)
11932 {
11933 case R_ARM_GOT32:
11934 case R_ARM_GOT_PREL:
11935 case R_ARM_TLS_GD32:
11936 case R_ARM_TLS_IE32:
11937 if (h != NULL)
11938 {
11939 if (h->got.refcount > 0)
11940 h->got.refcount -= 1;
11941 }
11942 else if (local_got_refcounts != NULL)
11943 {
11944 if (local_got_refcounts[r_symndx] > 0)
11945 local_got_refcounts[r_symndx] -= 1;
11946 }
11947 break;
11948
11949 case R_ARM_TLS_LDM32:
11950 globals->tls_ldm_got.refcount -= 1;
11951 break;
11952
11953 case R_ARM_PC24:
11954 case R_ARM_PLT32:
11955 case R_ARM_CALL:
11956 case R_ARM_JUMP24:
11957 case R_ARM_PREL31:
11958 case R_ARM_THM_CALL:
11959 case R_ARM_THM_JUMP24:
11960 case R_ARM_THM_JUMP19:
11961 call_reloc_p = TRUE;
11962 may_need_local_target_p = TRUE;
11963 break;
11964
11965 case R_ARM_ABS12:
11966 if (!globals->vxworks_p)
11967 {
11968 may_need_local_target_p = TRUE;
11969 break;
11970 }
11971 /* Fall through. */
11972 case R_ARM_ABS32:
11973 case R_ARM_ABS32_NOI:
11974 case R_ARM_REL32:
11975 case R_ARM_REL32_NOI:
11976 case R_ARM_MOVW_ABS_NC:
11977 case R_ARM_MOVT_ABS:
11978 case R_ARM_MOVW_PREL_NC:
11979 case R_ARM_MOVT_PREL:
11980 case R_ARM_THM_MOVW_ABS_NC:
11981 case R_ARM_THM_MOVT_ABS:
11982 case R_ARM_THM_MOVW_PREL_NC:
11983 case R_ARM_THM_MOVT_PREL:
11984 /* Should the interworking branches be here also? */
11985 if ((info->shared || globals->root.is_relocatable_executable)
11986 && (sec->flags & SEC_ALLOC) != 0)
11987 {
11988 if (h == NULL
11989 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
11990 {
11991 call_reloc_p = TRUE;
11992 may_need_local_target_p = TRUE;
11993 }
11994 else
11995 may_become_dynamic_p = TRUE;
11996 }
11997 else
11998 may_need_local_target_p = TRUE;
11999 break;
12000
12001 default:
12002 break;
12003 }
12004
12005 if (may_need_local_target_p
12006 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12007 {
12008 BFD_ASSERT (root_plt->refcount > 0);
12009 root_plt->refcount -= 1;
12010
12011 if (!call_reloc_p)
12012 arm_plt->noncall_refcount--;
12013
12014 if (r_type == R_ARM_THM_CALL)
12015 arm_plt->maybe_thumb_refcount--;
12016
12017 if (r_type == R_ARM_THM_JUMP24
12018 || r_type == R_ARM_THM_JUMP19)
12019 arm_plt->thumb_refcount--;
12020 }
12021
12022 if (may_become_dynamic_p)
12023 {
12024 struct elf_dyn_relocs **pp;
12025 struct elf_dyn_relocs *p;
12026
12027 if (h != NULL)
12028 pp = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12029 else
12030 {
12031 Elf_Internal_Sym *isym;
12032
12033 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12034 abfd, r_symndx);
12035 if (isym == NULL)
12036 return FALSE;
12037 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12038 if (pp == NULL)
12039 return FALSE;
12040 }
12041 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
12042 if (p->sec == sec)
12043 {
12044 /* Everything must go for SEC. */
12045 *pp = p->next;
12046 break;
12047 }
12048 }
12049 }
12050
12051 return TRUE;
12052 }
12053
12054 /* Look through the relocs for a section during the first phase. */
12055
12056 static bfd_boolean
12057 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12058 asection *sec, const Elf_Internal_Rela *relocs)
12059 {
12060 Elf_Internal_Shdr *symtab_hdr;
12061 struct elf_link_hash_entry **sym_hashes;
12062 const Elf_Internal_Rela *rel;
12063 const Elf_Internal_Rela *rel_end;
12064 bfd *dynobj;
12065 asection *sreloc;
12066 struct elf32_arm_link_hash_table *htab;
12067 bfd_boolean call_reloc_p;
12068 bfd_boolean may_become_dynamic_p;
12069 bfd_boolean may_need_local_target_p;
12070 unsigned long nsyms;
12071
12072 if (info->relocatable)
12073 return TRUE;
12074
12075 BFD_ASSERT (is_arm_elf (abfd));
12076
12077 htab = elf32_arm_hash_table (info);
12078 if (htab == NULL)
12079 return FALSE;
12080
12081 sreloc = NULL;
12082
12083 /* Create dynamic sections for relocatable executables so that we can
12084 copy relocations. */
12085 if (htab->root.is_relocatable_executable
12086 && ! htab->root.dynamic_sections_created)
12087 {
12088 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12089 return FALSE;
12090 }
12091
12092 if (htab->root.dynobj == NULL)
12093 htab->root.dynobj = abfd;
12094 if (!create_ifunc_sections (info))
12095 return FALSE;
12096
12097 dynobj = htab->root.dynobj;
12098
12099 symtab_hdr = & elf_symtab_hdr (abfd);
12100 sym_hashes = elf_sym_hashes (abfd);
12101 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12102
12103 rel_end = relocs + sec->reloc_count;
12104 for (rel = relocs; rel < rel_end; rel++)
12105 {
12106 Elf_Internal_Sym *isym;
12107 struct elf_link_hash_entry *h;
12108 struct elf32_arm_link_hash_entry *eh;
12109 unsigned long r_symndx;
12110 int r_type;
12111
12112 r_symndx = ELF32_R_SYM (rel->r_info);
12113 r_type = ELF32_R_TYPE (rel->r_info);
12114 r_type = arm_real_reloc_type (htab, r_type);
12115
12116 if (r_symndx >= nsyms
12117 /* PR 9934: It is possible to have relocations that do not
12118 refer to symbols, thus it is also possible to have an
12119 object file containing relocations but no symbol table. */
12120 && (r_symndx > STN_UNDEF || nsyms > 0))
12121 {
12122 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12123 r_symndx);
12124 return FALSE;
12125 }
12126
12127 h = NULL;
12128 isym = NULL;
12129 if (nsyms > 0)
12130 {
12131 if (r_symndx < symtab_hdr->sh_info)
12132 {
12133 /* A local symbol. */
12134 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12135 abfd, r_symndx);
12136 if (isym == NULL)
12137 return FALSE;
12138 }
12139 else
12140 {
12141 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12142 while (h->root.type == bfd_link_hash_indirect
12143 || h->root.type == bfd_link_hash_warning)
12144 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12145 }
12146 }
12147
12148 eh = (struct elf32_arm_link_hash_entry *) h;
12149
12150 call_reloc_p = FALSE;
12151 may_become_dynamic_p = FALSE;
12152 may_need_local_target_p = FALSE;
12153
12154 /* Could be done earlier, if h were already available. */
12155 r_type = elf32_arm_tls_transition (info, r_type, h);
12156 switch (r_type)
12157 {
12158 case R_ARM_GOT32:
12159 case R_ARM_GOT_PREL:
12160 case R_ARM_TLS_GD32:
12161 case R_ARM_TLS_IE32:
12162 case R_ARM_TLS_GOTDESC:
12163 case R_ARM_TLS_DESCSEQ:
12164 case R_ARM_THM_TLS_DESCSEQ:
12165 case R_ARM_TLS_CALL:
12166 case R_ARM_THM_TLS_CALL:
12167 /* This symbol requires a global offset table entry. */
12168 {
12169 int tls_type, old_tls_type;
12170
12171 switch (r_type)
12172 {
12173 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12174
12175 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12176
12177 case R_ARM_TLS_GOTDESC:
12178 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12179 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12180 tls_type = GOT_TLS_GDESC; break;
12181
12182 default: tls_type = GOT_NORMAL; break;
12183 }
12184
12185 if (h != NULL)
12186 {
12187 h->got.refcount++;
12188 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12189 }
12190 else
12191 {
12192 /* This is a global offset table entry for a local symbol. */
12193 if (!elf32_arm_allocate_local_sym_info (abfd))
12194 return FALSE;
12195 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12196 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12197 }
12198
12199 /* If a variable is accessed with both tls methods, two
12200 slots may be created. */
12201 if (GOT_TLS_GD_ANY_P (old_tls_type)
12202 && GOT_TLS_GD_ANY_P (tls_type))
12203 tls_type |= old_tls_type;
12204
12205 /* We will already have issued an error message if there
12206 is a TLS/non-TLS mismatch, based on the symbol
12207 type. So just combine any TLS types needed. */
12208 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12209 && tls_type != GOT_NORMAL)
12210 tls_type |= old_tls_type;
12211
12212 /* If the symbol is accessed in both IE and GDESC
12213 method, we're able to relax. Turn off the GDESC flag,
12214 without messing up with any other kind of tls types
12215 that may be involved */
12216 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12217 tls_type &= ~GOT_TLS_GDESC;
12218
12219 if (old_tls_type != tls_type)
12220 {
12221 if (h != NULL)
12222 elf32_arm_hash_entry (h)->tls_type = tls_type;
12223 else
12224 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12225 }
12226 }
12227 /* Fall through. */
12228
12229 case R_ARM_TLS_LDM32:
12230 if (r_type == R_ARM_TLS_LDM32)
12231 htab->tls_ldm_got.refcount++;
12232 /* Fall through. */
12233
12234 case R_ARM_GOTOFF32:
12235 case R_ARM_GOTPC:
12236 if (htab->root.sgot == NULL
12237 && !create_got_section (htab->root.dynobj, info))
12238 return FALSE;
12239 break;
12240
12241 case R_ARM_PC24:
12242 case R_ARM_PLT32:
12243 case R_ARM_CALL:
12244 case R_ARM_JUMP24:
12245 case R_ARM_PREL31:
12246 case R_ARM_THM_CALL:
12247 case R_ARM_THM_JUMP24:
12248 case R_ARM_THM_JUMP19:
12249 call_reloc_p = TRUE;
12250 may_need_local_target_p = TRUE;
12251 break;
12252
12253 case R_ARM_ABS12:
12254 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12255 ldr __GOTT_INDEX__ offsets. */
12256 if (!htab->vxworks_p)
12257 {
12258 may_need_local_target_p = TRUE;
12259 break;
12260 }
12261 /* Fall through. */
12262
12263 case R_ARM_MOVW_ABS_NC:
12264 case R_ARM_MOVT_ABS:
12265 case R_ARM_THM_MOVW_ABS_NC:
12266 case R_ARM_THM_MOVT_ABS:
12267 if (info->shared)
12268 {
12269 (*_bfd_error_handler)
12270 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12271 abfd, elf32_arm_howto_table_1[r_type].name,
12272 (h) ? h->root.root.string : "a local symbol");
12273 bfd_set_error (bfd_error_bad_value);
12274 return FALSE;
12275 }
12276
12277 /* Fall through. */
12278 case R_ARM_ABS32:
12279 case R_ARM_ABS32_NOI:
12280 case R_ARM_REL32:
12281 case R_ARM_REL32_NOI:
12282 case R_ARM_MOVW_PREL_NC:
12283 case R_ARM_MOVT_PREL:
12284 case R_ARM_THM_MOVW_PREL_NC:
12285 case R_ARM_THM_MOVT_PREL:
12286
12287 /* Should the interworking branches be listed here? */
12288 if ((info->shared || htab->root.is_relocatable_executable)
12289 && (sec->flags & SEC_ALLOC) != 0)
12290 {
12291 if (h == NULL
12292 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12293 {
12294 /* In shared libraries and relocatable executables,
12295 we treat local relative references as calls;
12296 see the related SYMBOL_CALLS_LOCAL code in
12297 allocate_dynrelocs. */
12298 call_reloc_p = TRUE;
12299 may_need_local_target_p = TRUE;
12300 }
12301 else
12302 /* We are creating a shared library or relocatable
12303 executable, and this is a reloc against a global symbol,
12304 or a non-PC-relative reloc against a local symbol.
12305 We may need to copy the reloc into the output. */
12306 may_become_dynamic_p = TRUE;
12307 }
12308 else
12309 may_need_local_target_p = TRUE;
12310 break;
12311
12312 /* This relocation describes the C++ object vtable hierarchy.
12313 Reconstruct it for later use during GC. */
12314 case R_ARM_GNU_VTINHERIT:
12315 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12316 return FALSE;
12317 break;
12318
12319 /* This relocation describes which C++ vtable entries are actually
12320 used. Record for later use during GC. */
12321 case R_ARM_GNU_VTENTRY:
12322 BFD_ASSERT (h != NULL);
12323 if (h != NULL
12324 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12325 return FALSE;
12326 break;
12327 }
12328
12329 if (h != NULL)
12330 {
12331 if (call_reloc_p)
12332 /* We may need a .plt entry if the function this reloc
12333 refers to is in a different object, regardless of the
12334 symbol's type. We can't tell for sure yet, because
12335 something later might force the symbol local. */
12336 h->needs_plt = 1;
12337 else if (may_need_local_target_p)
12338 /* If this reloc is in a read-only section, we might
12339 need a copy reloc. We can't check reliably at this
12340 stage whether the section is read-only, as input
12341 sections have not yet been mapped to output sections.
12342 Tentatively set the flag for now, and correct in
12343 adjust_dynamic_symbol. */
12344 h->non_got_ref = 1;
12345 }
12346
12347 if (may_need_local_target_p
12348 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12349 {
12350 union gotplt_union *root_plt;
12351 struct arm_plt_info *arm_plt;
12352 struct arm_local_iplt_info *local_iplt;
12353
12354 if (h != NULL)
12355 {
12356 root_plt = &h->plt;
12357 arm_plt = &eh->plt;
12358 }
12359 else
12360 {
12361 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12362 if (local_iplt == NULL)
12363 return FALSE;
12364 root_plt = &local_iplt->root;
12365 arm_plt = &local_iplt->arm;
12366 }
12367
12368 /* If the symbol is a function that doesn't bind locally,
12369 this relocation will need a PLT entry. */
12370 root_plt->refcount += 1;
12371
12372 if (!call_reloc_p)
12373 arm_plt->noncall_refcount++;
12374
12375 /* It's too early to use htab->use_blx here, so we have to
12376 record possible blx references separately from
12377 relocs that definitely need a thumb stub. */
12378
12379 if (r_type == R_ARM_THM_CALL)
12380 arm_plt->maybe_thumb_refcount += 1;
12381
12382 if (r_type == R_ARM_THM_JUMP24
12383 || r_type == R_ARM_THM_JUMP19)
12384 arm_plt->thumb_refcount += 1;
12385 }
12386
12387 if (may_become_dynamic_p)
12388 {
12389 struct elf_dyn_relocs *p, **head;
12390
12391 /* Create a reloc section in dynobj. */
12392 if (sreloc == NULL)
12393 {
12394 sreloc = _bfd_elf_make_dynamic_reloc_section
12395 (sec, dynobj, 2, abfd, ! htab->use_rel);
12396
12397 if (sreloc == NULL)
12398 return FALSE;
12399
12400 /* BPABI objects never have dynamic relocations mapped. */
12401 if (htab->symbian_p)
12402 {
12403 flagword flags;
12404
12405 flags = bfd_get_section_flags (dynobj, sreloc);
12406 flags &= ~(SEC_LOAD | SEC_ALLOC);
12407 bfd_set_section_flags (dynobj, sreloc, flags);
12408 }
12409 }
12410
12411 /* If this is a global symbol, count the number of
12412 relocations we need for this symbol. */
12413 if (h != NULL)
12414 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12415 else
12416 {
12417 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12418 if (head == NULL)
12419 return FALSE;
12420 }
12421
12422 p = *head;
12423 if (p == NULL || p->sec != sec)
12424 {
12425 bfd_size_type amt = sizeof *p;
12426
12427 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12428 if (p == NULL)
12429 return FALSE;
12430 p->next = *head;
12431 *head = p;
12432 p->sec = sec;
12433 p->count = 0;
12434 p->pc_count = 0;
12435 }
12436
12437 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12438 p->pc_count += 1;
12439 p->count += 1;
12440 }
12441 }
12442
12443 return TRUE;
12444 }
12445
12446 /* Unwinding tables are not referenced directly. This pass marks them as
12447 required if the corresponding code section is marked. */
12448
12449 static bfd_boolean
12450 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12451 elf_gc_mark_hook_fn gc_mark_hook)
12452 {
12453 bfd *sub;
12454 Elf_Internal_Shdr **elf_shdrp;
12455 bfd_boolean again;
12456
12457 /* Marking EH data may cause additional code sections to be marked,
12458 requiring multiple passes. */
12459 again = TRUE;
12460 while (again)
12461 {
12462 again = FALSE;
12463 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
12464 {
12465 asection *o;
12466
12467 if (! is_arm_elf (sub))
12468 continue;
12469
12470 elf_shdrp = elf_elfsections (sub);
12471 for (o = sub->sections; o != NULL; o = o->next)
12472 {
12473 Elf_Internal_Shdr *hdr;
12474
12475 hdr = &elf_section_data (o)->this_hdr;
12476 if (hdr->sh_type == SHT_ARM_EXIDX
12477 && hdr->sh_link
12478 && hdr->sh_link < elf_numsections (sub)
12479 && !o->gc_mark
12480 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
12481 {
12482 again = TRUE;
12483 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
12484 return FALSE;
12485 }
12486 }
12487 }
12488 }
12489
12490 return TRUE;
12491 }
12492
12493 /* Treat mapping symbols as special target symbols. */
12494
12495 static bfd_boolean
12496 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
12497 {
12498 return bfd_is_arm_special_symbol_name (sym->name,
12499 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
12500 }
12501
12502 /* This is a copy of elf_find_function() from elf.c except that
12503 ARM mapping symbols are ignored when looking for function names
12504 and STT_ARM_TFUNC is considered to a function type. */
12505
12506 static bfd_boolean
12507 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
12508 asection * section,
12509 asymbol ** symbols,
12510 bfd_vma offset,
12511 const char ** filename_ptr,
12512 const char ** functionname_ptr)
12513 {
12514 const char * filename = NULL;
12515 asymbol * func = NULL;
12516 bfd_vma low_func = 0;
12517 asymbol ** p;
12518
12519 for (p = symbols; *p != NULL; p++)
12520 {
12521 elf_symbol_type *q;
12522
12523 q = (elf_symbol_type *) *p;
12524
12525 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
12526 {
12527 default:
12528 break;
12529 case STT_FILE:
12530 filename = bfd_asymbol_name (&q->symbol);
12531 break;
12532 case STT_FUNC:
12533 case STT_ARM_TFUNC:
12534 case STT_NOTYPE:
12535 /* Skip mapping symbols. */
12536 if ((q->symbol.flags & BSF_LOCAL)
12537 && bfd_is_arm_special_symbol_name (q->symbol.name,
12538 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
12539 continue;
12540 /* Fall through. */
12541 if (bfd_get_section (&q->symbol) == section
12542 && q->symbol.value >= low_func
12543 && q->symbol.value <= offset)
12544 {
12545 func = (asymbol *) q;
12546 low_func = q->symbol.value;
12547 }
12548 break;
12549 }
12550 }
12551
12552 if (func == NULL)
12553 return FALSE;
12554
12555 if (filename_ptr)
12556 *filename_ptr = filename;
12557 if (functionname_ptr)
12558 *functionname_ptr = bfd_asymbol_name (func);
12559
12560 return TRUE;
12561 }
12562
12563
12564 /* Find the nearest line to a particular section and offset, for error
12565 reporting. This code is a duplicate of the code in elf.c, except
12566 that it uses arm_elf_find_function. */
12567
12568 static bfd_boolean
12569 elf32_arm_find_nearest_line (bfd * abfd,
12570 asection * section,
12571 asymbol ** symbols,
12572 bfd_vma offset,
12573 const char ** filename_ptr,
12574 const char ** functionname_ptr,
12575 unsigned int * line_ptr)
12576 {
12577 bfd_boolean found = FALSE;
12578
12579 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
12580
12581 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
12582 filename_ptr, functionname_ptr,
12583 line_ptr, 0,
12584 & elf_tdata (abfd)->dwarf2_find_line_info))
12585 {
12586 if (!*functionname_ptr)
12587 arm_elf_find_function (abfd, section, symbols, offset,
12588 *filename_ptr ? NULL : filename_ptr,
12589 functionname_ptr);
12590
12591 return TRUE;
12592 }
12593
12594 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
12595 & found, filename_ptr,
12596 functionname_ptr, line_ptr,
12597 & elf_tdata (abfd)->line_info))
12598 return FALSE;
12599
12600 if (found && (*functionname_ptr || *line_ptr))
12601 return TRUE;
12602
12603 if (symbols == NULL)
12604 return FALSE;
12605
12606 if (! arm_elf_find_function (abfd, section, symbols, offset,
12607 filename_ptr, functionname_ptr))
12608 return FALSE;
12609
12610 *line_ptr = 0;
12611 return TRUE;
12612 }
12613
12614 static bfd_boolean
12615 elf32_arm_find_inliner_info (bfd * abfd,
12616 const char ** filename_ptr,
12617 const char ** functionname_ptr,
12618 unsigned int * line_ptr)
12619 {
12620 bfd_boolean found;
12621 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
12622 functionname_ptr, line_ptr,
12623 & elf_tdata (abfd)->dwarf2_find_line_info);
12624 return found;
12625 }
12626
12627 /* Adjust a symbol defined by a dynamic object and referenced by a
12628 regular object. The current definition is in some section of the
12629 dynamic object, but we're not including those sections. We have to
12630 change the definition to something the rest of the link can
12631 understand. */
12632
12633 static bfd_boolean
12634 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
12635 struct elf_link_hash_entry * h)
12636 {
12637 bfd * dynobj;
12638 asection * s;
12639 struct elf32_arm_link_hash_entry * eh;
12640 struct elf32_arm_link_hash_table *globals;
12641
12642 globals = elf32_arm_hash_table (info);
12643 if (globals == NULL)
12644 return FALSE;
12645
12646 dynobj = elf_hash_table (info)->dynobj;
12647
12648 /* Make sure we know what is going on here. */
12649 BFD_ASSERT (dynobj != NULL
12650 && (h->needs_plt
12651 || h->type == STT_GNU_IFUNC
12652 || h->u.weakdef != NULL
12653 || (h->def_dynamic
12654 && h->ref_regular
12655 && !h->def_regular)));
12656
12657 eh = (struct elf32_arm_link_hash_entry *) h;
12658
12659 /* If this is a function, put it in the procedure linkage table. We
12660 will fill in the contents of the procedure linkage table later,
12661 when we know the address of the .got section. */
12662 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
12663 {
12664 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
12665 symbol binds locally. */
12666 if (h->plt.refcount <= 0
12667 || (h->type != STT_GNU_IFUNC
12668 && (SYMBOL_CALLS_LOCAL (info, h)
12669 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
12670 && h->root.type == bfd_link_hash_undefweak))))
12671 {
12672 /* This case can occur if we saw a PLT32 reloc in an input
12673 file, but the symbol was never referred to by a dynamic
12674 object, or if all references were garbage collected. In
12675 such a case, we don't actually need to build a procedure
12676 linkage table, and we can just do a PC24 reloc instead. */
12677 h->plt.offset = (bfd_vma) -1;
12678 eh->plt.thumb_refcount = 0;
12679 eh->plt.maybe_thumb_refcount = 0;
12680 eh->plt.noncall_refcount = 0;
12681 h->needs_plt = 0;
12682 }
12683
12684 return TRUE;
12685 }
12686 else
12687 {
12688 /* It's possible that we incorrectly decided a .plt reloc was
12689 needed for an R_ARM_PC24 or similar reloc to a non-function sym
12690 in check_relocs. We can't decide accurately between function
12691 and non-function syms in check-relocs; Objects loaded later in
12692 the link may change h->type. So fix it now. */
12693 h->plt.offset = (bfd_vma) -1;
12694 eh->plt.thumb_refcount = 0;
12695 eh->plt.maybe_thumb_refcount = 0;
12696 eh->plt.noncall_refcount = 0;
12697 }
12698
12699 /* If this is a weak symbol, and there is a real definition, the
12700 processor independent code will have arranged for us to see the
12701 real definition first, and we can just use the same value. */
12702 if (h->u.weakdef != NULL)
12703 {
12704 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
12705 || h->u.weakdef->root.type == bfd_link_hash_defweak);
12706 h->root.u.def.section = h->u.weakdef->root.u.def.section;
12707 h->root.u.def.value = h->u.weakdef->root.u.def.value;
12708 return TRUE;
12709 }
12710
12711 /* If there are no non-GOT references, we do not need a copy
12712 relocation. */
12713 if (!h->non_got_ref)
12714 return TRUE;
12715
12716 /* This is a reference to a symbol defined by a dynamic object which
12717 is not a function. */
12718
12719 /* If we are creating a shared library, we must presume that the
12720 only references to the symbol are via the global offset table.
12721 For such cases we need not do anything here; the relocations will
12722 be handled correctly by relocate_section. Relocatable executables
12723 can reference data in shared objects directly, so we don't need to
12724 do anything here. */
12725 if (info->shared || globals->root.is_relocatable_executable)
12726 return TRUE;
12727
12728 if (h->size == 0)
12729 {
12730 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
12731 h->root.root.string);
12732 return TRUE;
12733 }
12734
12735 /* We must allocate the symbol in our .dynbss section, which will
12736 become part of the .bss section of the executable. There will be
12737 an entry for this symbol in the .dynsym section. The dynamic
12738 object will contain position independent code, so all references
12739 from the dynamic object to this symbol will go through the global
12740 offset table. The dynamic linker will use the .dynsym entry to
12741 determine the address it must put in the global offset table, so
12742 both the dynamic object and the regular object will refer to the
12743 same memory location for the variable. */
12744 s = bfd_get_section_by_name (dynobj, ".dynbss");
12745 BFD_ASSERT (s != NULL);
12746
12747 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
12748 copy the initial value out of the dynamic object and into the
12749 runtime process image. We need to remember the offset into the
12750 .rel(a).bss section we are going to use. */
12751 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
12752 {
12753 asection *srel;
12754
12755 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
12756 elf32_arm_allocate_dynrelocs (info, srel, 1);
12757 h->needs_copy = 1;
12758 }
12759
12760 return _bfd_elf_adjust_dynamic_copy (h, s);
12761 }
12762
12763 /* Allocate space in .plt, .got and associated reloc sections for
12764 dynamic relocs. */
12765
12766 static bfd_boolean
12767 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
12768 {
12769 struct bfd_link_info *info;
12770 struct elf32_arm_link_hash_table *htab;
12771 struct elf32_arm_link_hash_entry *eh;
12772 struct elf_dyn_relocs *p;
12773
12774 if (h->root.type == bfd_link_hash_indirect)
12775 return TRUE;
12776
12777 if (h->root.type == bfd_link_hash_warning)
12778 /* When warning symbols are created, they **replace** the "real"
12779 entry in the hash table, thus we never get to see the real
12780 symbol in a hash traversal. So look at it now. */
12781 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12782
12783 eh = (struct elf32_arm_link_hash_entry *) h;
12784
12785 info = (struct bfd_link_info *) inf;
12786 htab = elf32_arm_hash_table (info);
12787 if (htab == NULL)
12788 return FALSE;
12789
12790 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
12791 && h->plt.refcount > 0)
12792 {
12793 /* Make sure this symbol is output as a dynamic symbol.
12794 Undefined weak syms won't yet be marked as dynamic. */
12795 if (h->dynindx == -1
12796 && !h->forced_local)
12797 {
12798 if (! bfd_elf_link_record_dynamic_symbol (info, h))
12799 return FALSE;
12800 }
12801
12802 /* If the call in the PLT entry binds locally, the associated
12803 GOT entry should use an R_ARM_IRELATIVE relocation instead of
12804 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
12805 than the .plt section. */
12806 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
12807 {
12808 eh->is_iplt = 1;
12809 if (eh->plt.noncall_refcount == 0
12810 && SYMBOL_REFERENCES_LOCAL (info, h))
12811 /* All non-call references can be resolved directly.
12812 This means that they can (and in some cases, must)
12813 resolve directly to the run-time target, rather than
12814 to the PLT. That in turns means that any .got entry
12815 would be equal to the .igot.plt entry, so there's
12816 no point having both. */
12817 h->got.refcount = 0;
12818 }
12819
12820 if (info->shared
12821 || eh->is_iplt
12822 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
12823 {
12824 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
12825
12826 /* If this symbol is not defined in a regular file, and we are
12827 not generating a shared library, then set the symbol to this
12828 location in the .plt. This is required to make function
12829 pointers compare as equal between the normal executable and
12830 the shared library. */
12831 if (! info->shared
12832 && !h->def_regular)
12833 {
12834 h->root.u.def.section = htab->root.splt;
12835 h->root.u.def.value = h->plt.offset;
12836
12837 /* Make sure the function is not marked as Thumb, in case
12838 it is the target of an ABS32 relocation, which will
12839 point to the PLT entry. */
12840 h->target_internal = ST_BRANCH_TO_ARM;
12841 }
12842
12843 htab->next_tls_desc_index++;
12844
12845 /* VxWorks executables have a second set of relocations for
12846 each PLT entry. They go in a separate relocation section,
12847 which is processed by the kernel loader. */
12848 if (htab->vxworks_p && !info->shared)
12849 {
12850 /* There is a relocation for the initial PLT entry:
12851 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
12852 if (h->plt.offset == htab->plt_header_size)
12853 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
12854
12855 /* There are two extra relocations for each subsequent
12856 PLT entry: an R_ARM_32 relocation for the GOT entry,
12857 and an R_ARM_32 relocation for the PLT entry. */
12858 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
12859 }
12860 }
12861 else
12862 {
12863 h->plt.offset = (bfd_vma) -1;
12864 h->needs_plt = 0;
12865 }
12866 }
12867 else
12868 {
12869 h->plt.offset = (bfd_vma) -1;
12870 h->needs_plt = 0;
12871 }
12872
12873 eh = (struct elf32_arm_link_hash_entry *) h;
12874 eh->tlsdesc_got = (bfd_vma) -1;
12875
12876 if (h->got.refcount > 0)
12877 {
12878 asection *s;
12879 bfd_boolean dyn;
12880 int tls_type = elf32_arm_hash_entry (h)->tls_type;
12881 int indx;
12882
12883 /* Make sure this symbol is output as a dynamic symbol.
12884 Undefined weak syms won't yet be marked as dynamic. */
12885 if (h->dynindx == -1
12886 && !h->forced_local)
12887 {
12888 if (! bfd_elf_link_record_dynamic_symbol (info, h))
12889 return FALSE;
12890 }
12891
12892 if (!htab->symbian_p)
12893 {
12894 s = htab->root.sgot;
12895 h->got.offset = s->size;
12896
12897 if (tls_type == GOT_UNKNOWN)
12898 abort ();
12899
12900 if (tls_type == GOT_NORMAL)
12901 /* Non-TLS symbols need one GOT slot. */
12902 s->size += 4;
12903 else
12904 {
12905 if (tls_type & GOT_TLS_GDESC)
12906 {
12907 /* R_ARM_TLS_DESC needs 2 GOT slots. */
12908 eh->tlsdesc_got
12909 = (htab->root.sgotplt->size
12910 - elf32_arm_compute_jump_table_size (htab));
12911 htab->root.sgotplt->size += 8;
12912 h->got.offset = (bfd_vma) -2;
12913 /* plt.got_offset needs to know there's a TLS_DESC
12914 reloc in the middle of .got.plt. */
12915 htab->num_tls_desc++;
12916 }
12917
12918 if (tls_type & GOT_TLS_GD)
12919 {
12920 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
12921 the symbol is both GD and GDESC, got.offset may
12922 have been overwritten. */
12923 h->got.offset = s->size;
12924 s->size += 8;
12925 }
12926
12927 if (tls_type & GOT_TLS_IE)
12928 /* R_ARM_TLS_IE32 needs one GOT slot. */
12929 s->size += 4;
12930 }
12931
12932 dyn = htab->root.dynamic_sections_created;
12933
12934 indx = 0;
12935 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
12936 && (!info->shared
12937 || !SYMBOL_REFERENCES_LOCAL (info, h)))
12938 indx = h->dynindx;
12939
12940 if (tls_type != GOT_NORMAL
12941 && (info->shared || indx != 0)
12942 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
12943 || h->root.type != bfd_link_hash_undefweak))
12944 {
12945 if (tls_type & GOT_TLS_IE)
12946 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
12947
12948 if (tls_type & GOT_TLS_GD)
12949 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
12950
12951 if (tls_type & GOT_TLS_GDESC)
12952 {
12953 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
12954 /* GDESC needs a trampoline to jump to. */
12955 htab->tls_trampoline = -1;
12956 }
12957
12958 /* Only GD needs it. GDESC just emits one relocation per
12959 2 entries. */
12960 if ((tls_type & GOT_TLS_GD) && indx != 0)
12961 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
12962 }
12963 else if (!SYMBOL_REFERENCES_LOCAL (info, h))
12964 {
12965 if (htab->root.dynamic_sections_created)
12966 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
12967 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
12968 }
12969 else if (h->type == STT_GNU_IFUNC
12970 && eh->plt.noncall_refcount == 0)
12971 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
12972 they all resolve dynamically instead. Reserve room for the
12973 GOT entry's R_ARM_IRELATIVE relocation. */
12974 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
12975 else if (info->shared)
12976 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
12977 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
12978 }
12979 }
12980 else
12981 h->got.offset = (bfd_vma) -1;
12982
12983 /* Allocate stubs for exported Thumb functions on v4t. */
12984 if (!htab->use_blx && h->dynindx != -1
12985 && h->def_regular
12986 && h->target_internal == ST_BRANCH_TO_THUMB
12987 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
12988 {
12989 struct elf_link_hash_entry * th;
12990 struct bfd_link_hash_entry * bh;
12991 struct elf_link_hash_entry * myh;
12992 char name[1024];
12993 asection *s;
12994 bh = NULL;
12995 /* Create a new symbol to regist the real location of the function. */
12996 s = h->root.u.def.section;
12997 sprintf (name, "__real_%s", h->root.root.string);
12998 _bfd_generic_link_add_one_symbol (info, s->owner,
12999 name, BSF_GLOBAL, s,
13000 h->root.u.def.value,
13001 NULL, TRUE, FALSE, &bh);
13002
13003 myh = (struct elf_link_hash_entry *) bh;
13004 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13005 myh->forced_local = 1;
13006 myh->target_internal = ST_BRANCH_TO_THUMB;
13007 eh->export_glue = myh;
13008 th = record_arm_to_thumb_glue (info, h);
13009 /* Point the symbol at the stub. */
13010 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13011 h->target_internal = ST_BRANCH_TO_ARM;
13012 h->root.u.def.section = th->root.u.def.section;
13013 h->root.u.def.value = th->root.u.def.value & ~1;
13014 }
13015
13016 if (eh->dyn_relocs == NULL)
13017 return TRUE;
13018
13019 /* In the shared -Bsymbolic case, discard space allocated for
13020 dynamic pc-relative relocs against symbols which turn out to be
13021 defined in regular objects. For the normal shared case, discard
13022 space for pc-relative relocs that have become local due to symbol
13023 visibility changes. */
13024
13025 if (info->shared || htab->root.is_relocatable_executable)
13026 {
13027 /* The only relocs that use pc_count are R_ARM_REL32 and
13028 R_ARM_REL32_NOI, which will appear on something like
13029 ".long foo - .". We want calls to protected symbols to resolve
13030 directly to the function rather than going via the plt. If people
13031 want function pointer comparisons to work as expected then they
13032 should avoid writing assembly like ".long foo - .". */
13033 if (SYMBOL_CALLS_LOCAL (info, h))
13034 {
13035 struct elf_dyn_relocs **pp;
13036
13037 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13038 {
13039 p->count -= p->pc_count;
13040 p->pc_count = 0;
13041 if (p->count == 0)
13042 *pp = p->next;
13043 else
13044 pp = &p->next;
13045 }
13046 }
13047
13048 if (htab->vxworks_p)
13049 {
13050 struct elf_dyn_relocs **pp;
13051
13052 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13053 {
13054 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13055 *pp = p->next;
13056 else
13057 pp = &p->next;
13058 }
13059 }
13060
13061 /* Also discard relocs on undefined weak syms with non-default
13062 visibility. */
13063 if (eh->dyn_relocs != NULL
13064 && h->root.type == bfd_link_hash_undefweak)
13065 {
13066 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13067 eh->dyn_relocs = NULL;
13068
13069 /* Make sure undefined weak symbols are output as a dynamic
13070 symbol in PIEs. */
13071 else if (h->dynindx == -1
13072 && !h->forced_local)
13073 {
13074 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13075 return FALSE;
13076 }
13077 }
13078
13079 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13080 && h->root.type == bfd_link_hash_new)
13081 {
13082 /* Output absolute symbols so that we can create relocations
13083 against them. For normal symbols we output a relocation
13084 against the section that contains them. */
13085 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13086 return FALSE;
13087 }
13088
13089 }
13090 else
13091 {
13092 /* For the non-shared case, discard space for relocs against
13093 symbols which turn out to need copy relocs or are not
13094 dynamic. */
13095
13096 if (!h->non_got_ref
13097 && ((h->def_dynamic
13098 && !h->def_regular)
13099 || (htab->root.dynamic_sections_created
13100 && (h->root.type == bfd_link_hash_undefweak
13101 || h->root.type == bfd_link_hash_undefined))))
13102 {
13103 /* Make sure this symbol is output as a dynamic symbol.
13104 Undefined weak syms won't yet be marked as dynamic. */
13105 if (h->dynindx == -1
13106 && !h->forced_local)
13107 {
13108 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13109 return FALSE;
13110 }
13111
13112 /* If that succeeded, we know we'll be keeping all the
13113 relocs. */
13114 if (h->dynindx != -1)
13115 goto keep;
13116 }
13117
13118 eh->dyn_relocs = NULL;
13119
13120 keep: ;
13121 }
13122
13123 /* Finally, allocate space. */
13124 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13125 {
13126 asection *sreloc = elf_section_data (p->sec)->sreloc;
13127 if (h->type == STT_GNU_IFUNC
13128 && eh->plt.noncall_refcount == 0
13129 && SYMBOL_REFERENCES_LOCAL (info, h))
13130 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13131 else
13132 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13133 }
13134
13135 return TRUE;
13136 }
13137
13138 /* Find any dynamic relocs that apply to read-only sections. */
13139
13140 static bfd_boolean
13141 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13142 {
13143 struct elf32_arm_link_hash_entry * eh;
13144 struct elf_dyn_relocs * p;
13145
13146 if (h->root.type == bfd_link_hash_warning)
13147 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13148
13149 eh = (struct elf32_arm_link_hash_entry *) h;
13150 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13151 {
13152 asection *s = p->sec;
13153
13154 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13155 {
13156 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13157
13158 info->flags |= DF_TEXTREL;
13159
13160 /* Not an error, just cut short the traversal. */
13161 return FALSE;
13162 }
13163 }
13164 return TRUE;
13165 }
13166
13167 void
13168 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13169 int byteswap_code)
13170 {
13171 struct elf32_arm_link_hash_table *globals;
13172
13173 globals = elf32_arm_hash_table (info);
13174 if (globals == NULL)
13175 return;
13176
13177 globals->byteswap_code = byteswap_code;
13178 }
13179
13180 /* Set the sizes of the dynamic sections. */
13181
13182 static bfd_boolean
13183 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13184 struct bfd_link_info * info)
13185 {
13186 bfd * dynobj;
13187 asection * s;
13188 bfd_boolean plt;
13189 bfd_boolean relocs;
13190 bfd *ibfd;
13191 struct elf32_arm_link_hash_table *htab;
13192
13193 htab = elf32_arm_hash_table (info);
13194 if (htab == NULL)
13195 return FALSE;
13196
13197 dynobj = elf_hash_table (info)->dynobj;
13198 BFD_ASSERT (dynobj != NULL);
13199 check_use_blx (htab);
13200
13201 if (elf_hash_table (info)->dynamic_sections_created)
13202 {
13203 /* Set the contents of the .interp section to the interpreter. */
13204 if (info->executable)
13205 {
13206 s = bfd_get_section_by_name (dynobj, ".interp");
13207 BFD_ASSERT (s != NULL);
13208 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13209 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13210 }
13211 }
13212
13213 /* Set up .got offsets for local syms, and space for local dynamic
13214 relocs. */
13215 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13216 {
13217 bfd_signed_vma *local_got;
13218 bfd_signed_vma *end_local_got;
13219 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13220 char *local_tls_type;
13221 bfd_vma *local_tlsdesc_gotent;
13222 bfd_size_type locsymcount;
13223 Elf_Internal_Shdr *symtab_hdr;
13224 asection *srel;
13225 bfd_boolean is_vxworks = htab->vxworks_p;
13226 unsigned int symndx;
13227
13228 if (! is_arm_elf (ibfd))
13229 continue;
13230
13231 for (s = ibfd->sections; s != NULL; s = s->next)
13232 {
13233 struct elf_dyn_relocs *p;
13234
13235 for (p = (struct elf_dyn_relocs *)
13236 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13237 {
13238 if (!bfd_is_abs_section (p->sec)
13239 && bfd_is_abs_section (p->sec->output_section))
13240 {
13241 /* Input section has been discarded, either because
13242 it is a copy of a linkonce section or due to
13243 linker script /DISCARD/, so we'll be discarding
13244 the relocs too. */
13245 }
13246 else if (is_vxworks
13247 && strcmp (p->sec->output_section->name,
13248 ".tls_vars") == 0)
13249 {
13250 /* Relocations in vxworks .tls_vars sections are
13251 handled specially by the loader. */
13252 }
13253 else if (p->count != 0)
13254 {
13255 srel = elf_section_data (p->sec)->sreloc;
13256 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13257 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13258 info->flags |= DF_TEXTREL;
13259 }
13260 }
13261 }
13262
13263 local_got = elf_local_got_refcounts (ibfd);
13264 if (!local_got)
13265 continue;
13266
13267 symtab_hdr = & elf_symtab_hdr (ibfd);
13268 locsymcount = symtab_hdr->sh_info;
13269 end_local_got = local_got + locsymcount;
13270 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13271 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13272 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13273 symndx = 0;
13274 s = htab->root.sgot;
13275 srel = htab->root.srelgot;
13276 for (; local_got < end_local_got;
13277 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13278 ++local_tlsdesc_gotent, ++symndx)
13279 {
13280 *local_tlsdesc_gotent = (bfd_vma) -1;
13281 local_iplt = *local_iplt_ptr;
13282 if (local_iplt != NULL)
13283 {
13284 struct elf_dyn_relocs *p;
13285
13286 if (local_iplt->root.refcount > 0)
13287 {
13288 elf32_arm_allocate_plt_entry (info, TRUE,
13289 &local_iplt->root,
13290 &local_iplt->arm);
13291 if (local_iplt->arm.noncall_refcount == 0)
13292 /* All references to the PLT are calls, so all
13293 non-call references can resolve directly to the
13294 run-time target. This means that the .got entry
13295 would be the same as the .igot.plt entry, so there's
13296 no point creating both. */
13297 *local_got = 0;
13298 }
13299 else
13300 {
13301 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13302 local_iplt->root.offset = (bfd_vma) -1;
13303 }
13304
13305 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13306 {
13307 asection *psrel;
13308
13309 psrel = elf_section_data (p->sec)->sreloc;
13310 if (local_iplt->arm.noncall_refcount == 0)
13311 elf32_arm_allocate_irelocs (info, psrel, p->count);
13312 else
13313 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13314 }
13315 }
13316 if (*local_got > 0)
13317 {
13318 Elf_Internal_Sym *isym;
13319
13320 *local_got = s->size;
13321 if (*local_tls_type & GOT_TLS_GD)
13322 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13323 s->size += 8;
13324 if (*local_tls_type & GOT_TLS_GDESC)
13325 {
13326 *local_tlsdesc_gotent = htab->root.sgotplt->size
13327 - elf32_arm_compute_jump_table_size (htab);
13328 htab->root.sgotplt->size += 8;
13329 *local_got = (bfd_vma) -2;
13330 /* plt.got_offset needs to know there's a TLS_DESC
13331 reloc in the middle of .got.plt. */
13332 htab->num_tls_desc++;
13333 }
13334 if (*local_tls_type & GOT_TLS_IE)
13335 s->size += 4;
13336
13337 if (*local_tls_type & GOT_NORMAL)
13338 {
13339 /* If the symbol is both GD and GDESC, *local_got
13340 may have been overwritten. */
13341 *local_got = s->size;
13342 s->size += 4;
13343 }
13344
13345 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13346 if (isym == NULL)
13347 return FALSE;
13348
13349 /* If all references to an STT_GNU_IFUNC PLT are calls,
13350 then all non-call references, including this GOT entry,
13351 resolve directly to the run-time target. */
13352 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13353 && (local_iplt == NULL
13354 || local_iplt->arm.noncall_refcount == 0))
13355 elf32_arm_allocate_irelocs (info, srel, 1);
13356 else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13357 || *local_tls_type & GOT_TLS_GD)
13358 elf32_arm_allocate_dynrelocs (info, srel, 1);
13359
13360 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13361 {
13362 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13363 htab->tls_trampoline = -1;
13364 }
13365 }
13366 else
13367 *local_got = (bfd_vma) -1;
13368 }
13369 }
13370
13371 if (htab->tls_ldm_got.refcount > 0)
13372 {
13373 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13374 for R_ARM_TLS_LDM32 relocations. */
13375 htab->tls_ldm_got.offset = htab->root.sgot->size;
13376 htab->root.sgot->size += 8;
13377 if (info->shared)
13378 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13379 }
13380 else
13381 htab->tls_ldm_got.offset = -1;
13382
13383 /* Allocate global sym .plt and .got entries, and space for global
13384 sym dynamic relocs. */
13385 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13386
13387 /* Here we rummage through the found bfds to collect glue information. */
13388 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13389 {
13390 if (! is_arm_elf (ibfd))
13391 continue;
13392
13393 /* Initialise mapping tables for code/data. */
13394 bfd_elf32_arm_init_maps (ibfd);
13395
13396 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13397 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13398 /* xgettext:c-format */
13399 _bfd_error_handler (_("Errors encountered processing file %s"),
13400 ibfd->filename);
13401 }
13402
13403 /* Allocate space for the glue sections now that we've sized them. */
13404 bfd_elf32_arm_allocate_interworking_sections (info);
13405
13406 /* For every jump slot reserved in the sgotplt, reloc_count is
13407 incremented. However, when we reserve space for TLS descriptors,
13408 it's not incremented, so in order to compute the space reserved
13409 for them, it suffices to multiply the reloc count by the jump
13410 slot size. */
13411 if (htab->root.srelplt)
13412 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13413
13414 if (htab->tls_trampoline)
13415 {
13416 if (htab->root.splt->size == 0)
13417 htab->root.splt->size += htab->plt_header_size;
13418
13419 htab->tls_trampoline = htab->root.splt->size;
13420 htab->root.splt->size += htab->plt_entry_size;
13421
13422 /* If we're not using lazy TLS relocations, don't generate the
13423 PLT and GOT entries they require. */
13424 if (!(info->flags & DF_BIND_NOW))
13425 {
13426 htab->dt_tlsdesc_got = htab->root.sgot->size;
13427 htab->root.sgot->size += 4;
13428
13429 htab->dt_tlsdesc_plt = htab->root.splt->size;
13430 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13431 }
13432 }
13433
13434 /* The check_relocs and adjust_dynamic_symbol entry points have
13435 determined the sizes of the various dynamic sections. Allocate
13436 memory for them. */
13437 plt = FALSE;
13438 relocs = FALSE;
13439 for (s = dynobj->sections; s != NULL; s = s->next)
13440 {
13441 const char * name;
13442
13443 if ((s->flags & SEC_LINKER_CREATED) == 0)
13444 continue;
13445
13446 /* It's OK to base decisions on the section name, because none
13447 of the dynobj section names depend upon the input files. */
13448 name = bfd_get_section_name (dynobj, s);
13449
13450 if (s == htab->root.splt)
13451 {
13452 /* Remember whether there is a PLT. */
13453 plt = s->size != 0;
13454 }
13455 else if (CONST_STRNEQ (name, ".rel"))
13456 {
13457 if (s->size != 0)
13458 {
13459 /* Remember whether there are any reloc sections other
13460 than .rel(a).plt and .rela.plt.unloaded. */
13461 if (s != htab->root.srelplt && s != htab->srelplt2)
13462 relocs = TRUE;
13463
13464 /* We use the reloc_count field as a counter if we need
13465 to copy relocs into the output file. */
13466 s->reloc_count = 0;
13467 }
13468 }
13469 else if (s != htab->root.sgot
13470 && s != htab->root.sgotplt
13471 && s != htab->root.iplt
13472 && s != htab->root.igotplt
13473 && s != htab->sdynbss)
13474 {
13475 /* It's not one of our sections, so don't allocate space. */
13476 continue;
13477 }
13478
13479 if (s->size == 0)
13480 {
13481 /* If we don't need this section, strip it from the
13482 output file. This is mostly to handle .rel(a).bss and
13483 .rel(a).plt. We must create both sections in
13484 create_dynamic_sections, because they must be created
13485 before the linker maps input sections to output
13486 sections. The linker does that before
13487 adjust_dynamic_symbol is called, and it is that
13488 function which decides whether anything needs to go
13489 into these sections. */
13490 s->flags |= SEC_EXCLUDE;
13491 continue;
13492 }
13493
13494 if ((s->flags & SEC_HAS_CONTENTS) == 0)
13495 continue;
13496
13497 /* Allocate memory for the section contents. */
13498 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
13499 if (s->contents == NULL)
13500 return FALSE;
13501 }
13502
13503 if (elf_hash_table (info)->dynamic_sections_created)
13504 {
13505 /* Add some entries to the .dynamic section. We fill in the
13506 values later, in elf32_arm_finish_dynamic_sections, but we
13507 must add the entries now so that we get the correct size for
13508 the .dynamic section. The DT_DEBUG entry is filled in by the
13509 dynamic linker and used by the debugger. */
13510 #define add_dynamic_entry(TAG, VAL) \
13511 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
13512
13513 if (info->executable)
13514 {
13515 if (!add_dynamic_entry (DT_DEBUG, 0))
13516 return FALSE;
13517 }
13518
13519 if (plt)
13520 {
13521 if ( !add_dynamic_entry (DT_PLTGOT, 0)
13522 || !add_dynamic_entry (DT_PLTRELSZ, 0)
13523 || !add_dynamic_entry (DT_PLTREL,
13524 htab->use_rel ? DT_REL : DT_RELA)
13525 || !add_dynamic_entry (DT_JMPREL, 0))
13526 return FALSE;
13527
13528 if (htab->dt_tlsdesc_plt &&
13529 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
13530 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
13531 return FALSE;
13532 }
13533
13534 if (relocs)
13535 {
13536 if (htab->use_rel)
13537 {
13538 if (!add_dynamic_entry (DT_REL, 0)
13539 || !add_dynamic_entry (DT_RELSZ, 0)
13540 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
13541 return FALSE;
13542 }
13543 else
13544 {
13545 if (!add_dynamic_entry (DT_RELA, 0)
13546 || !add_dynamic_entry (DT_RELASZ, 0)
13547 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
13548 return FALSE;
13549 }
13550 }
13551
13552 /* If any dynamic relocs apply to a read-only section,
13553 then we need a DT_TEXTREL entry. */
13554 if ((info->flags & DF_TEXTREL) == 0)
13555 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
13556 info);
13557
13558 if ((info->flags & DF_TEXTREL) != 0)
13559 {
13560 if (!add_dynamic_entry (DT_TEXTREL, 0))
13561 return FALSE;
13562 }
13563 if (htab->vxworks_p
13564 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
13565 return FALSE;
13566 }
13567 #undef add_dynamic_entry
13568
13569 return TRUE;
13570 }
13571
13572 /* Size sections even though they're not dynamic. We use it to setup
13573 _TLS_MODULE_BASE_, if needed. */
13574
13575 static bfd_boolean
13576 elf32_arm_always_size_sections (bfd *output_bfd,
13577 struct bfd_link_info *info)
13578 {
13579 asection *tls_sec;
13580
13581 if (info->relocatable)
13582 return TRUE;
13583
13584 tls_sec = elf_hash_table (info)->tls_sec;
13585
13586 if (tls_sec)
13587 {
13588 struct elf_link_hash_entry *tlsbase;
13589
13590 tlsbase = elf_link_hash_lookup
13591 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
13592
13593 if (tlsbase)
13594 {
13595 struct bfd_link_hash_entry *bh = NULL;
13596 const struct elf_backend_data *bed
13597 = get_elf_backend_data (output_bfd);
13598
13599 if (!(_bfd_generic_link_add_one_symbol
13600 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
13601 tls_sec, 0, NULL, FALSE,
13602 bed->collect, &bh)))
13603 return FALSE;
13604
13605 tlsbase->type = STT_TLS;
13606 tlsbase = (struct elf_link_hash_entry *)bh;
13607 tlsbase->def_regular = 1;
13608 tlsbase->other = STV_HIDDEN;
13609 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
13610 }
13611 }
13612 return TRUE;
13613 }
13614
13615 /* Finish up dynamic symbol handling. We set the contents of various
13616 dynamic sections here. */
13617
13618 static bfd_boolean
13619 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
13620 struct bfd_link_info * info,
13621 struct elf_link_hash_entry * h,
13622 Elf_Internal_Sym * sym)
13623 {
13624 struct elf32_arm_link_hash_table *htab;
13625 struct elf32_arm_link_hash_entry *eh;
13626
13627 htab = elf32_arm_hash_table (info);
13628 if (htab == NULL)
13629 return FALSE;
13630
13631 eh = (struct elf32_arm_link_hash_entry *) h;
13632
13633 if (h->plt.offset != (bfd_vma) -1)
13634 {
13635 if (!eh->is_iplt)
13636 {
13637 BFD_ASSERT (h->dynindx != -1);
13638 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
13639 h->dynindx, 0);
13640 }
13641
13642 if (!h->def_regular)
13643 {
13644 /* Mark the symbol as undefined, rather than as defined in
13645 the .plt section. Leave the value alone. */
13646 sym->st_shndx = SHN_UNDEF;
13647 /* If the symbol is weak, we do need to clear the value.
13648 Otherwise, the PLT entry would provide a definition for
13649 the symbol even if the symbol wasn't defined anywhere,
13650 and so the symbol would never be NULL. */
13651 if (!h->ref_regular_nonweak)
13652 sym->st_value = 0;
13653 }
13654 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
13655 {
13656 /* At least one non-call relocation references this .iplt entry,
13657 so the .iplt entry is the function's canonical address. */
13658 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
13659 sym->st_target_internal = ST_BRANCH_TO_ARM;
13660 sym->st_shndx = (_bfd_elf_section_from_bfd_section
13661 (output_bfd, htab->root.iplt->output_section));
13662 sym->st_value = (h->plt.offset
13663 + htab->root.iplt->output_section->vma
13664 + htab->root.iplt->output_offset);
13665 }
13666 }
13667
13668 if (h->needs_copy)
13669 {
13670 asection * s;
13671 Elf_Internal_Rela rel;
13672
13673 /* This symbol needs a copy reloc. Set it up. */
13674 BFD_ASSERT (h->dynindx != -1
13675 && (h->root.type == bfd_link_hash_defined
13676 || h->root.type == bfd_link_hash_defweak));
13677
13678 s = htab->srelbss;
13679 BFD_ASSERT (s != NULL);
13680
13681 rel.r_addend = 0;
13682 rel.r_offset = (h->root.u.def.value
13683 + h->root.u.def.section->output_section->vma
13684 + h->root.u.def.section->output_offset);
13685 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
13686 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
13687 }
13688
13689 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
13690 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
13691 to the ".got" section. */
13692 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
13693 || (!htab->vxworks_p && h == htab->root.hgot))
13694 sym->st_shndx = SHN_ABS;
13695
13696 return TRUE;
13697 }
13698
13699 static void
13700 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
13701 void *contents,
13702 const unsigned long *template, unsigned count)
13703 {
13704 unsigned ix;
13705
13706 for (ix = 0; ix != count; ix++)
13707 {
13708 unsigned long insn = template[ix];
13709
13710 /* Emit mov pc,rx if bx is not permitted. */
13711 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
13712 insn = (insn & 0xf000000f) | 0x01a0f000;
13713 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
13714 }
13715 }
13716
13717 /* Finish up the dynamic sections. */
13718
13719 static bfd_boolean
13720 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
13721 {
13722 bfd * dynobj;
13723 asection * sgot;
13724 asection * sdyn;
13725 struct elf32_arm_link_hash_table *htab;
13726
13727 htab = elf32_arm_hash_table (info);
13728 if (htab == NULL)
13729 return FALSE;
13730
13731 dynobj = elf_hash_table (info)->dynobj;
13732
13733 sgot = htab->root.sgotplt;
13734 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
13735
13736 if (elf_hash_table (info)->dynamic_sections_created)
13737 {
13738 asection *splt;
13739 Elf32_External_Dyn *dyncon, *dynconend;
13740
13741 splt = htab->root.splt;
13742 BFD_ASSERT (splt != NULL && sdyn != NULL);
13743 BFD_ASSERT (htab->symbian_p || sgot != NULL);
13744
13745 dyncon = (Elf32_External_Dyn *) sdyn->contents;
13746 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
13747
13748 for (; dyncon < dynconend; dyncon++)
13749 {
13750 Elf_Internal_Dyn dyn;
13751 const char * name;
13752 asection * s;
13753
13754 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
13755
13756 switch (dyn.d_tag)
13757 {
13758 unsigned int type;
13759
13760 default:
13761 if (htab->vxworks_p
13762 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
13763 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13764 break;
13765
13766 case DT_HASH:
13767 name = ".hash";
13768 goto get_vma_if_bpabi;
13769 case DT_STRTAB:
13770 name = ".dynstr";
13771 goto get_vma_if_bpabi;
13772 case DT_SYMTAB:
13773 name = ".dynsym";
13774 goto get_vma_if_bpabi;
13775 case DT_VERSYM:
13776 name = ".gnu.version";
13777 goto get_vma_if_bpabi;
13778 case DT_VERDEF:
13779 name = ".gnu.version_d";
13780 goto get_vma_if_bpabi;
13781 case DT_VERNEED:
13782 name = ".gnu.version_r";
13783 goto get_vma_if_bpabi;
13784
13785 case DT_PLTGOT:
13786 name = ".got";
13787 goto get_vma;
13788 case DT_JMPREL:
13789 name = RELOC_SECTION (htab, ".plt");
13790 get_vma:
13791 s = bfd_get_section_by_name (output_bfd, name);
13792 BFD_ASSERT (s != NULL);
13793 if (!htab->symbian_p)
13794 dyn.d_un.d_ptr = s->vma;
13795 else
13796 /* In the BPABI, tags in the PT_DYNAMIC section point
13797 at the file offset, not the memory address, for the
13798 convenience of the post linker. */
13799 dyn.d_un.d_ptr = s->filepos;
13800 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13801 break;
13802
13803 get_vma_if_bpabi:
13804 if (htab->symbian_p)
13805 goto get_vma;
13806 break;
13807
13808 case DT_PLTRELSZ:
13809 s = htab->root.srelplt;
13810 BFD_ASSERT (s != NULL);
13811 dyn.d_un.d_val = s->size;
13812 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13813 break;
13814
13815 case DT_RELSZ:
13816 case DT_RELASZ:
13817 if (!htab->symbian_p)
13818 {
13819 /* My reading of the SVR4 ABI indicates that the
13820 procedure linkage table relocs (DT_JMPREL) should be
13821 included in the overall relocs (DT_REL). This is
13822 what Solaris does. However, UnixWare can not handle
13823 that case. Therefore, we override the DT_RELSZ entry
13824 here to make it not include the JMPREL relocs. Since
13825 the linker script arranges for .rel(a).plt to follow all
13826 other relocation sections, we don't have to worry
13827 about changing the DT_REL entry. */
13828 s = htab->root.srelplt;
13829 if (s != NULL)
13830 dyn.d_un.d_val -= s->size;
13831 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13832 break;
13833 }
13834 /* Fall through. */
13835
13836 case DT_REL:
13837 case DT_RELA:
13838 /* In the BPABI, the DT_REL tag must point at the file
13839 offset, not the VMA, of the first relocation
13840 section. So, we use code similar to that in
13841 elflink.c, but do not check for SHF_ALLOC on the
13842 relcoation section, since relocations sections are
13843 never allocated under the BPABI. The comments above
13844 about Unixware notwithstanding, we include all of the
13845 relocations here. */
13846 if (htab->symbian_p)
13847 {
13848 unsigned int i;
13849 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
13850 ? SHT_REL : SHT_RELA);
13851 dyn.d_un.d_val = 0;
13852 for (i = 1; i < elf_numsections (output_bfd); i++)
13853 {
13854 Elf_Internal_Shdr *hdr
13855 = elf_elfsections (output_bfd)[i];
13856 if (hdr->sh_type == type)
13857 {
13858 if (dyn.d_tag == DT_RELSZ
13859 || dyn.d_tag == DT_RELASZ)
13860 dyn.d_un.d_val += hdr->sh_size;
13861 else if ((ufile_ptr) hdr->sh_offset
13862 <= dyn.d_un.d_val - 1)
13863 dyn.d_un.d_val = hdr->sh_offset;
13864 }
13865 }
13866 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13867 }
13868 break;
13869
13870 case DT_TLSDESC_PLT:
13871 s = htab->root.splt;
13872 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
13873 + htab->dt_tlsdesc_plt);
13874 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13875 break;
13876
13877 case DT_TLSDESC_GOT:
13878 s = htab->root.sgot;
13879 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
13880 + htab->dt_tlsdesc_got);
13881 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13882 break;
13883
13884 /* Set the bottom bit of DT_INIT/FINI if the
13885 corresponding function is Thumb. */
13886 case DT_INIT:
13887 name = info->init_function;
13888 goto get_sym;
13889 case DT_FINI:
13890 name = info->fini_function;
13891 get_sym:
13892 /* If it wasn't set by elf_bfd_final_link
13893 then there is nothing to adjust. */
13894 if (dyn.d_un.d_val != 0)
13895 {
13896 struct elf_link_hash_entry * eh;
13897
13898 eh = elf_link_hash_lookup (elf_hash_table (info), name,
13899 FALSE, FALSE, TRUE);
13900 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
13901 {
13902 dyn.d_un.d_val |= 1;
13903 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
13904 }
13905 }
13906 break;
13907 }
13908 }
13909
13910 /* Fill in the first entry in the procedure linkage table. */
13911 if (splt->size > 0 && htab->plt_header_size)
13912 {
13913 const bfd_vma *plt0_entry;
13914 bfd_vma got_address, plt_address, got_displacement;
13915
13916 /* Calculate the addresses of the GOT and PLT. */
13917 got_address = sgot->output_section->vma + sgot->output_offset;
13918 plt_address = splt->output_section->vma + splt->output_offset;
13919
13920 if (htab->vxworks_p)
13921 {
13922 /* The VxWorks GOT is relocated by the dynamic linker.
13923 Therefore, we must emit relocations rather than simply
13924 computing the values now. */
13925 Elf_Internal_Rela rel;
13926
13927 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
13928 put_arm_insn (htab, output_bfd, plt0_entry[0],
13929 splt->contents + 0);
13930 put_arm_insn (htab, output_bfd, plt0_entry[1],
13931 splt->contents + 4);
13932 put_arm_insn (htab, output_bfd, plt0_entry[2],
13933 splt->contents + 8);
13934 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
13935
13936 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
13937 rel.r_offset = plt_address + 12;
13938 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
13939 rel.r_addend = 0;
13940 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
13941 htab->srelplt2->contents);
13942 }
13943 else
13944 {
13945 got_displacement = got_address - (plt_address + 16);
13946
13947 plt0_entry = elf32_arm_plt0_entry;
13948 put_arm_insn (htab, output_bfd, plt0_entry[0],
13949 splt->contents + 0);
13950 put_arm_insn (htab, output_bfd, plt0_entry[1],
13951 splt->contents + 4);
13952 put_arm_insn (htab, output_bfd, plt0_entry[2],
13953 splt->contents + 8);
13954 put_arm_insn (htab, output_bfd, plt0_entry[3],
13955 splt->contents + 12);
13956
13957 #ifdef FOUR_WORD_PLT
13958 /* The displacement value goes in the otherwise-unused
13959 last word of the second entry. */
13960 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
13961 #else
13962 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
13963 #endif
13964 }
13965 }
13966
13967 /* UnixWare sets the entsize of .plt to 4, although that doesn't
13968 really seem like the right value. */
13969 if (splt->output_section->owner == output_bfd)
13970 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
13971
13972 if (htab->dt_tlsdesc_plt)
13973 {
13974 bfd_vma got_address
13975 = sgot->output_section->vma + sgot->output_offset;
13976 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
13977 + htab->root.sgot->output_offset);
13978 bfd_vma plt_address
13979 = splt->output_section->vma + splt->output_offset;
13980
13981 arm_put_trampoline (htab, output_bfd,
13982 splt->contents + htab->dt_tlsdesc_plt,
13983 dl_tlsdesc_lazy_trampoline, 6);
13984
13985 bfd_put_32 (output_bfd,
13986 gotplt_address + htab->dt_tlsdesc_got
13987 - (plt_address + htab->dt_tlsdesc_plt)
13988 - dl_tlsdesc_lazy_trampoline[6],
13989 splt->contents + htab->dt_tlsdesc_plt + 24);
13990 bfd_put_32 (output_bfd,
13991 got_address - (plt_address + htab->dt_tlsdesc_plt)
13992 - dl_tlsdesc_lazy_trampoline[7],
13993 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
13994 }
13995
13996 if (htab->tls_trampoline)
13997 {
13998 arm_put_trampoline (htab, output_bfd,
13999 splt->contents + htab->tls_trampoline,
14000 tls_trampoline, 3);
14001 #ifdef FOUR_WORD_PLT
14002 bfd_put_32 (output_bfd, 0x00000000,
14003 splt->contents + htab->tls_trampoline + 12);
14004 #endif
14005 }
14006
14007 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14008 {
14009 /* Correct the .rel(a).plt.unloaded relocations. They will have
14010 incorrect symbol indexes. */
14011 int num_plts;
14012 unsigned char *p;
14013
14014 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14015 / htab->plt_entry_size);
14016 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14017
14018 for (; num_plts; num_plts--)
14019 {
14020 Elf_Internal_Rela rel;
14021
14022 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14023 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14024 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14025 p += RELOC_SIZE (htab);
14026
14027 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14028 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14029 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14030 p += RELOC_SIZE (htab);
14031 }
14032 }
14033 }
14034
14035 /* Fill in the first three entries in the global offset table. */
14036 if (sgot)
14037 {
14038 if (sgot->size > 0)
14039 {
14040 if (sdyn == NULL)
14041 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14042 else
14043 bfd_put_32 (output_bfd,
14044 sdyn->output_section->vma + sdyn->output_offset,
14045 sgot->contents);
14046 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14047 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14048 }
14049
14050 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14051 }
14052
14053 return TRUE;
14054 }
14055
14056 static void
14057 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14058 {
14059 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14060 struct elf32_arm_link_hash_table *globals;
14061
14062 i_ehdrp = elf_elfheader (abfd);
14063
14064 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14065 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14066 else
14067 i_ehdrp->e_ident[EI_OSABI] = 0;
14068 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14069
14070 if (link_info)
14071 {
14072 globals = elf32_arm_hash_table (link_info);
14073 if (globals != NULL && globals->byteswap_code)
14074 i_ehdrp->e_flags |= EF_ARM_BE8;
14075 }
14076 }
14077
14078 static enum elf_reloc_type_class
14079 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
14080 {
14081 switch ((int) ELF32_R_TYPE (rela->r_info))
14082 {
14083 case R_ARM_RELATIVE:
14084 return reloc_class_relative;
14085 case R_ARM_JUMP_SLOT:
14086 return reloc_class_plt;
14087 case R_ARM_COPY:
14088 return reloc_class_copy;
14089 default:
14090 return reloc_class_normal;
14091 }
14092 }
14093
14094 /* Set the right machine number for an Arm ELF file. */
14095
14096 static bfd_boolean
14097 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
14098 {
14099 if (hdr->sh_type == SHT_NOTE)
14100 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
14101
14102 return TRUE;
14103 }
14104
14105 static void
14106 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14107 {
14108 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14109 }
14110
14111 /* Return TRUE if this is an unwinding table entry. */
14112
14113 static bfd_boolean
14114 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14115 {
14116 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14117 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14118 }
14119
14120
14121 /* Set the type and flags for an ARM section. We do this by
14122 the section name, which is a hack, but ought to work. */
14123
14124 static bfd_boolean
14125 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14126 {
14127 const char * name;
14128
14129 name = bfd_get_section_name (abfd, sec);
14130
14131 if (is_arm_elf_unwind_section_name (abfd, name))
14132 {
14133 hdr->sh_type = SHT_ARM_EXIDX;
14134 hdr->sh_flags |= SHF_LINK_ORDER;
14135 }
14136 return TRUE;
14137 }
14138
14139 /* Handle an ARM specific section when reading an object file. This is
14140 called when bfd_section_from_shdr finds a section with an unknown
14141 type. */
14142
14143 static bfd_boolean
14144 elf32_arm_section_from_shdr (bfd *abfd,
14145 Elf_Internal_Shdr * hdr,
14146 const char *name,
14147 int shindex)
14148 {
14149 /* There ought to be a place to keep ELF backend specific flags, but
14150 at the moment there isn't one. We just keep track of the
14151 sections by their name, instead. Fortunately, the ABI gives
14152 names for all the ARM specific sections, so we will probably get
14153 away with this. */
14154 switch (hdr->sh_type)
14155 {
14156 case SHT_ARM_EXIDX:
14157 case SHT_ARM_PREEMPTMAP:
14158 case SHT_ARM_ATTRIBUTES:
14159 break;
14160
14161 default:
14162 return FALSE;
14163 }
14164
14165 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14166 return FALSE;
14167
14168 return TRUE;
14169 }
14170
14171 static _arm_elf_section_data *
14172 get_arm_elf_section_data (asection * sec)
14173 {
14174 if (sec && sec->owner && is_arm_elf (sec->owner))
14175 return elf32_arm_section_data (sec);
14176 else
14177 return NULL;
14178 }
14179
14180 typedef struct
14181 {
14182 void *finfo;
14183 struct bfd_link_info *info;
14184 asection *sec;
14185 int sec_shndx;
14186 int (*func) (void *, const char *, Elf_Internal_Sym *,
14187 asection *, struct elf_link_hash_entry *);
14188 } output_arch_syminfo;
14189
14190 enum map_symbol_type
14191 {
14192 ARM_MAP_ARM,
14193 ARM_MAP_THUMB,
14194 ARM_MAP_DATA
14195 };
14196
14197
14198 /* Output a single mapping symbol. */
14199
14200 static bfd_boolean
14201 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14202 enum map_symbol_type type,
14203 bfd_vma offset)
14204 {
14205 static const char *names[3] = {"$a", "$t", "$d"};
14206 Elf_Internal_Sym sym;
14207
14208 sym.st_value = osi->sec->output_section->vma
14209 + osi->sec->output_offset
14210 + offset;
14211 sym.st_size = 0;
14212 sym.st_other = 0;
14213 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14214 sym.st_shndx = osi->sec_shndx;
14215 sym.st_target_internal = 0;
14216 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14217 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
14218 }
14219
14220 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14221 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14222
14223 static bfd_boolean
14224 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14225 bfd_boolean is_iplt_entry_p,
14226 union gotplt_union *root_plt,
14227 struct arm_plt_info *arm_plt)
14228 {
14229 struct elf32_arm_link_hash_table *htab;
14230 bfd_vma addr, plt_header_size;
14231
14232 if (root_plt->offset == (bfd_vma) -1)
14233 return TRUE;
14234
14235 htab = elf32_arm_hash_table (osi->info);
14236 if (htab == NULL)
14237 return FALSE;
14238
14239 if (is_iplt_entry_p)
14240 {
14241 osi->sec = htab->root.iplt;
14242 plt_header_size = 0;
14243 }
14244 else
14245 {
14246 osi->sec = htab->root.splt;
14247 plt_header_size = htab->plt_header_size;
14248 }
14249 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14250 (osi->info->output_bfd, osi->sec->output_section));
14251
14252 addr = root_plt->offset & -2;
14253 if (htab->symbian_p)
14254 {
14255 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14256 return FALSE;
14257 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14258 return FALSE;
14259 }
14260 else if (htab->vxworks_p)
14261 {
14262 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14263 return FALSE;
14264 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14265 return FALSE;
14266 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14267 return FALSE;
14268 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14269 return FALSE;
14270 }
14271 else
14272 {
14273 bfd_boolean thumb_stub_p;
14274
14275 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14276 if (thumb_stub_p)
14277 {
14278 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14279 return FALSE;
14280 }
14281 #ifdef FOUR_WORD_PLT
14282 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14283 return FALSE;
14284 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14285 return FALSE;
14286 #else
14287 /* A three-word PLT with no Thumb thunk contains only Arm code,
14288 so only need to output a mapping symbol for the first PLT entry and
14289 entries with thumb thunks. */
14290 if (thumb_stub_p || addr == plt_header_size)
14291 {
14292 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14293 return FALSE;
14294 }
14295 #endif
14296 }
14297
14298 return TRUE;
14299 }
14300
14301 /* Output mapping symbols for PLT entries associated with H. */
14302
14303 static bfd_boolean
14304 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14305 {
14306 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14307 struct elf32_arm_link_hash_entry *eh;
14308
14309 if (h->root.type == bfd_link_hash_indirect)
14310 return TRUE;
14311
14312 if (h->root.type == bfd_link_hash_warning)
14313 /* When warning symbols are created, they **replace** the "real"
14314 entry in the hash table, thus we never get to see the real
14315 symbol in a hash traversal. So look at it now. */
14316 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14317
14318 eh = (struct elf32_arm_link_hash_entry *) h;
14319 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14320 &h->plt, &eh->plt);
14321 }
14322
14323 /* Output a single local symbol for a generated stub. */
14324
14325 static bfd_boolean
14326 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14327 bfd_vma offset, bfd_vma size)
14328 {
14329 Elf_Internal_Sym sym;
14330
14331 sym.st_value = osi->sec->output_section->vma
14332 + osi->sec->output_offset
14333 + offset;
14334 sym.st_size = size;
14335 sym.st_other = 0;
14336 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14337 sym.st_shndx = osi->sec_shndx;
14338 sym.st_target_internal = 0;
14339 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
14340 }
14341
14342 static bfd_boolean
14343 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14344 void * in_arg)
14345 {
14346 struct elf32_arm_stub_hash_entry *stub_entry;
14347 asection *stub_sec;
14348 bfd_vma addr;
14349 char *stub_name;
14350 output_arch_syminfo *osi;
14351 const insn_sequence *template_sequence;
14352 enum stub_insn_type prev_type;
14353 int size;
14354 int i;
14355 enum map_symbol_type sym_type;
14356
14357 /* Massage our args to the form they really have. */
14358 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14359 osi = (output_arch_syminfo *) in_arg;
14360
14361 stub_sec = stub_entry->stub_sec;
14362
14363 /* Ensure this stub is attached to the current section being
14364 processed. */
14365 if (stub_sec != osi->sec)
14366 return TRUE;
14367
14368 addr = (bfd_vma) stub_entry->stub_offset;
14369 stub_name = stub_entry->output_name;
14370
14371 template_sequence = stub_entry->stub_template;
14372 switch (template_sequence[0].type)
14373 {
14374 case ARM_TYPE:
14375 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14376 return FALSE;
14377 break;
14378 case THUMB16_TYPE:
14379 case THUMB32_TYPE:
14380 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14381 stub_entry->stub_size))
14382 return FALSE;
14383 break;
14384 default:
14385 BFD_FAIL ();
14386 return 0;
14387 }
14388
14389 prev_type = DATA_TYPE;
14390 size = 0;
14391 for (i = 0; i < stub_entry->stub_template_size; i++)
14392 {
14393 switch (template_sequence[i].type)
14394 {
14395 case ARM_TYPE:
14396 sym_type = ARM_MAP_ARM;
14397 break;
14398
14399 case THUMB16_TYPE:
14400 case THUMB32_TYPE:
14401 sym_type = ARM_MAP_THUMB;
14402 break;
14403
14404 case DATA_TYPE:
14405 sym_type = ARM_MAP_DATA;
14406 break;
14407
14408 default:
14409 BFD_FAIL ();
14410 return FALSE;
14411 }
14412
14413 if (template_sequence[i].type != prev_type)
14414 {
14415 prev_type = template_sequence[i].type;
14416 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
14417 return FALSE;
14418 }
14419
14420 switch (template_sequence[i].type)
14421 {
14422 case ARM_TYPE:
14423 case THUMB32_TYPE:
14424 size += 4;
14425 break;
14426
14427 case THUMB16_TYPE:
14428 size += 2;
14429 break;
14430
14431 case DATA_TYPE:
14432 size += 4;
14433 break;
14434
14435 default:
14436 BFD_FAIL ();
14437 return FALSE;
14438 }
14439 }
14440
14441 return TRUE;
14442 }
14443
14444 /* Output mapping symbols for linker generated sections,
14445 and for those data-only sections that do not have a
14446 $d. */
14447
14448 static bfd_boolean
14449 elf32_arm_output_arch_local_syms (bfd *output_bfd,
14450 struct bfd_link_info *info,
14451 void *finfo,
14452 int (*func) (void *, const char *,
14453 Elf_Internal_Sym *,
14454 asection *,
14455 struct elf_link_hash_entry *))
14456 {
14457 output_arch_syminfo osi;
14458 struct elf32_arm_link_hash_table *htab;
14459 bfd_vma offset;
14460 bfd_size_type size;
14461 bfd *input_bfd;
14462
14463 htab = elf32_arm_hash_table (info);
14464 if (htab == NULL)
14465 return FALSE;
14466
14467 check_use_blx (htab);
14468
14469 osi.finfo = finfo;
14470 osi.info = info;
14471 osi.func = func;
14472
14473 /* Add a $d mapping symbol to data-only sections that
14474 don't have any mapping symbol. This may result in (harmless) redundant
14475 mapping symbols. */
14476 for (input_bfd = info->input_bfds;
14477 input_bfd != NULL;
14478 input_bfd = input_bfd->link_next)
14479 {
14480 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
14481 for (osi.sec = input_bfd->sections;
14482 osi.sec != NULL;
14483 osi.sec = osi.sec->next)
14484 {
14485 if (osi.sec->output_section != NULL
14486 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
14487 != 0)
14488 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
14489 == SEC_HAS_CONTENTS
14490 && get_arm_elf_section_data (osi.sec) != NULL
14491 && get_arm_elf_section_data (osi.sec)->mapcount == 0
14492 && osi.sec->size > 0)
14493 {
14494 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14495 (output_bfd, osi.sec->output_section);
14496 if (osi.sec_shndx != (int)SHN_BAD)
14497 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
14498 }
14499 }
14500 }
14501
14502 /* ARM->Thumb glue. */
14503 if (htab->arm_glue_size > 0)
14504 {
14505 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14506 ARM2THUMB_GLUE_SECTION_NAME);
14507
14508 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14509 (output_bfd, osi.sec->output_section);
14510 if (info->shared || htab->root.is_relocatable_executable
14511 || htab->pic_veneer)
14512 size = ARM2THUMB_PIC_GLUE_SIZE;
14513 else if (htab->use_blx)
14514 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
14515 else
14516 size = ARM2THUMB_STATIC_GLUE_SIZE;
14517
14518 for (offset = 0; offset < htab->arm_glue_size; offset += size)
14519 {
14520 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
14521 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
14522 }
14523 }
14524
14525 /* Thumb->ARM glue. */
14526 if (htab->thumb_glue_size > 0)
14527 {
14528 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14529 THUMB2ARM_GLUE_SECTION_NAME);
14530
14531 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14532 (output_bfd, osi.sec->output_section);
14533 size = THUMB2ARM_GLUE_SIZE;
14534
14535 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
14536 {
14537 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
14538 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
14539 }
14540 }
14541
14542 /* ARMv4 BX veneers. */
14543 if (htab->bx_glue_size > 0)
14544 {
14545 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14546 ARM_BX_GLUE_SECTION_NAME);
14547
14548 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14549 (output_bfd, osi.sec->output_section);
14550
14551 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
14552 }
14553
14554 /* Long calls stubs. */
14555 if (htab->stub_bfd && htab->stub_bfd->sections)
14556 {
14557 asection* stub_sec;
14558
14559 for (stub_sec = htab->stub_bfd->sections;
14560 stub_sec != NULL;
14561 stub_sec = stub_sec->next)
14562 {
14563 /* Ignore non-stub sections. */
14564 if (!strstr (stub_sec->name, STUB_SUFFIX))
14565 continue;
14566
14567 osi.sec = stub_sec;
14568
14569 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14570 (output_bfd, osi.sec->output_section);
14571
14572 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
14573 }
14574 }
14575
14576 /* Finally, output mapping symbols for the PLT. */
14577 if (htab->root.splt && htab->root.splt->size > 0)
14578 {
14579 osi.sec = htab->root.splt;
14580 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
14581 (output_bfd, osi.sec->output_section));
14582
14583 /* Output mapping symbols for the plt header. SymbianOS does not have a
14584 plt header. */
14585 if (htab->vxworks_p)
14586 {
14587 /* VxWorks shared libraries have no PLT header. */
14588 if (!info->shared)
14589 {
14590 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14591 return FALSE;
14592 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
14593 return FALSE;
14594 }
14595 }
14596 else if (!htab->symbian_p)
14597 {
14598 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14599 return FALSE;
14600 #ifndef FOUR_WORD_PLT
14601 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
14602 return FALSE;
14603 #endif
14604 }
14605 }
14606 if ((htab->root.splt && htab->root.splt->size > 0)
14607 || (htab->root.iplt && htab->root.iplt->size > 0))
14608 {
14609 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
14610 for (input_bfd = info->input_bfds;
14611 input_bfd != NULL;
14612 input_bfd = input_bfd->link_next)
14613 {
14614 struct arm_local_iplt_info **local_iplt;
14615 unsigned int i, num_syms;
14616
14617 local_iplt = elf32_arm_local_iplt (input_bfd);
14618 if (local_iplt != NULL)
14619 {
14620 num_syms = elf_symtab_hdr (input_bfd).sh_info;
14621 for (i = 0; i < num_syms; i++)
14622 if (local_iplt[i] != NULL
14623 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
14624 &local_iplt[i]->root,
14625 &local_iplt[i]->arm))
14626 return FALSE;
14627 }
14628 }
14629 }
14630 if (htab->dt_tlsdesc_plt != 0)
14631 {
14632 /* Mapping symbols for the lazy tls trampoline. */
14633 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
14634 return FALSE;
14635
14636 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
14637 htab->dt_tlsdesc_plt + 24))
14638 return FALSE;
14639 }
14640 if (htab->tls_trampoline != 0)
14641 {
14642 /* Mapping symbols for the tls trampoline. */
14643 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
14644 return FALSE;
14645 #ifdef FOUR_WORD_PLT
14646 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
14647 htab->tls_trampoline + 12))
14648 return FALSE;
14649 #endif
14650 }
14651
14652 return TRUE;
14653 }
14654
14655 /* Allocate target specific section data. */
14656
14657 static bfd_boolean
14658 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
14659 {
14660 if (!sec->used_by_bfd)
14661 {
14662 _arm_elf_section_data *sdata;
14663 bfd_size_type amt = sizeof (*sdata);
14664
14665 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
14666 if (sdata == NULL)
14667 return FALSE;
14668 sec->used_by_bfd = sdata;
14669 }
14670
14671 return _bfd_elf_new_section_hook (abfd, sec);
14672 }
14673
14674
14675 /* Used to order a list of mapping symbols by address. */
14676
14677 static int
14678 elf32_arm_compare_mapping (const void * a, const void * b)
14679 {
14680 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
14681 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
14682
14683 if (amap->vma > bmap->vma)
14684 return 1;
14685 else if (amap->vma < bmap->vma)
14686 return -1;
14687 else if (amap->type > bmap->type)
14688 /* Ensure results do not depend on the host qsort for objects with
14689 multiple mapping symbols at the same address by sorting on type
14690 after vma. */
14691 return 1;
14692 else if (amap->type < bmap->type)
14693 return -1;
14694 else
14695 return 0;
14696 }
14697
14698 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
14699
14700 static unsigned long
14701 offset_prel31 (unsigned long addr, bfd_vma offset)
14702 {
14703 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
14704 }
14705
14706 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
14707 relocations. */
14708
14709 static void
14710 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
14711 {
14712 unsigned long first_word = bfd_get_32 (output_bfd, from);
14713 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
14714
14715 /* High bit of first word is supposed to be zero. */
14716 if ((first_word & 0x80000000ul) == 0)
14717 first_word = offset_prel31 (first_word, offset);
14718
14719 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
14720 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
14721 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
14722 second_word = offset_prel31 (second_word, offset);
14723
14724 bfd_put_32 (output_bfd, first_word, to);
14725 bfd_put_32 (output_bfd, second_word, to + 4);
14726 }
14727
14728 /* Data for make_branch_to_a8_stub(). */
14729
14730 struct a8_branch_to_stub_data {
14731 asection *writing_section;
14732 bfd_byte *contents;
14733 };
14734
14735
14736 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
14737 places for a particular section. */
14738
14739 static bfd_boolean
14740 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
14741 void *in_arg)
14742 {
14743 struct elf32_arm_stub_hash_entry *stub_entry;
14744 struct a8_branch_to_stub_data *data;
14745 bfd_byte *contents;
14746 unsigned long branch_insn;
14747 bfd_vma veneered_insn_loc, veneer_entry_loc;
14748 bfd_signed_vma branch_offset;
14749 bfd *abfd;
14750 unsigned int target;
14751
14752 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14753 data = (struct a8_branch_to_stub_data *) in_arg;
14754
14755 if (stub_entry->target_section != data->writing_section
14756 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
14757 return TRUE;
14758
14759 contents = data->contents;
14760
14761 veneered_insn_loc = stub_entry->target_section->output_section->vma
14762 + stub_entry->target_section->output_offset
14763 + stub_entry->target_value;
14764
14765 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
14766 + stub_entry->stub_sec->output_offset
14767 + stub_entry->stub_offset;
14768
14769 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
14770 veneered_insn_loc &= ~3u;
14771
14772 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
14773
14774 abfd = stub_entry->target_section->owner;
14775 target = stub_entry->target_value;
14776
14777 /* We attempt to avoid this condition by setting stubs_always_after_branch
14778 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
14779 This check is just to be on the safe side... */
14780 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
14781 {
14782 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
14783 "allocated in unsafe location"), abfd);
14784 return FALSE;
14785 }
14786
14787 switch (stub_entry->stub_type)
14788 {
14789 case arm_stub_a8_veneer_b:
14790 case arm_stub_a8_veneer_b_cond:
14791 branch_insn = 0xf0009000;
14792 goto jump24;
14793
14794 case arm_stub_a8_veneer_blx:
14795 branch_insn = 0xf000e800;
14796 goto jump24;
14797
14798 case arm_stub_a8_veneer_bl:
14799 {
14800 unsigned int i1, j1, i2, j2, s;
14801
14802 branch_insn = 0xf000d000;
14803
14804 jump24:
14805 if (branch_offset < -16777216 || branch_offset > 16777214)
14806 {
14807 /* There's not much we can do apart from complain if this
14808 happens. */
14809 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
14810 "of range (input file too large)"), abfd);
14811 return FALSE;
14812 }
14813
14814 /* i1 = not(j1 eor s), so:
14815 not i1 = j1 eor s
14816 j1 = (not i1) eor s. */
14817
14818 branch_insn |= (branch_offset >> 1) & 0x7ff;
14819 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
14820 i2 = (branch_offset >> 22) & 1;
14821 i1 = (branch_offset >> 23) & 1;
14822 s = (branch_offset >> 24) & 1;
14823 j1 = (!i1) ^ s;
14824 j2 = (!i2) ^ s;
14825 branch_insn |= j2 << 11;
14826 branch_insn |= j1 << 13;
14827 branch_insn |= s << 26;
14828 }
14829 break;
14830
14831 default:
14832 BFD_FAIL ();
14833 return FALSE;
14834 }
14835
14836 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
14837 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
14838
14839 return TRUE;
14840 }
14841
14842 /* Do code byteswapping. Return FALSE afterwards so that the section is
14843 written out as normal. */
14844
14845 static bfd_boolean
14846 elf32_arm_write_section (bfd *output_bfd,
14847 struct bfd_link_info *link_info,
14848 asection *sec,
14849 bfd_byte *contents)
14850 {
14851 unsigned int mapcount, errcount;
14852 _arm_elf_section_data *arm_data;
14853 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
14854 elf32_arm_section_map *map;
14855 elf32_vfp11_erratum_list *errnode;
14856 bfd_vma ptr;
14857 bfd_vma end;
14858 bfd_vma offset = sec->output_section->vma + sec->output_offset;
14859 bfd_byte tmp;
14860 unsigned int i;
14861
14862 if (globals == NULL)
14863 return FALSE;
14864
14865 /* If this section has not been allocated an _arm_elf_section_data
14866 structure then we cannot record anything. */
14867 arm_data = get_arm_elf_section_data (sec);
14868 if (arm_data == NULL)
14869 return FALSE;
14870
14871 mapcount = arm_data->mapcount;
14872 map = arm_data->map;
14873 errcount = arm_data->erratumcount;
14874
14875 if (errcount != 0)
14876 {
14877 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
14878
14879 for (errnode = arm_data->erratumlist; errnode != 0;
14880 errnode = errnode->next)
14881 {
14882 bfd_vma target = errnode->vma - offset;
14883
14884 switch (errnode->type)
14885 {
14886 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
14887 {
14888 bfd_vma branch_to_veneer;
14889 /* Original condition code of instruction, plus bit mask for
14890 ARM B instruction. */
14891 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
14892 | 0x0a000000;
14893
14894 /* The instruction is before the label. */
14895 target -= 4;
14896
14897 /* Above offset included in -4 below. */
14898 branch_to_veneer = errnode->u.b.veneer->vma
14899 - errnode->vma - 4;
14900
14901 if ((signed) branch_to_veneer < -(1 << 25)
14902 || (signed) branch_to_veneer >= (1 << 25))
14903 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
14904 "range"), output_bfd);
14905
14906 insn |= (branch_to_veneer >> 2) & 0xffffff;
14907 contents[endianflip ^ target] = insn & 0xff;
14908 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
14909 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
14910 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
14911 }
14912 break;
14913
14914 case VFP11_ERRATUM_ARM_VENEER:
14915 {
14916 bfd_vma branch_from_veneer;
14917 unsigned int insn;
14918
14919 /* Take size of veneer into account. */
14920 branch_from_veneer = errnode->u.v.branch->vma
14921 - errnode->vma - 12;
14922
14923 if ((signed) branch_from_veneer < -(1 << 25)
14924 || (signed) branch_from_veneer >= (1 << 25))
14925 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
14926 "range"), output_bfd);
14927
14928 /* Original instruction. */
14929 insn = errnode->u.v.branch->u.b.vfp_insn;
14930 contents[endianflip ^ target] = insn & 0xff;
14931 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
14932 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
14933 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
14934
14935 /* Branch back to insn after original insn. */
14936 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
14937 contents[endianflip ^ (target + 4)] = insn & 0xff;
14938 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
14939 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
14940 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
14941 }
14942 break;
14943
14944 default:
14945 abort ();
14946 }
14947 }
14948 }
14949
14950 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
14951 {
14952 arm_unwind_table_edit *edit_node
14953 = arm_data->u.exidx.unwind_edit_list;
14954 /* Now, sec->size is the size of the section we will write. The original
14955 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
14956 markers) was sec->rawsize. (This isn't the case if we perform no
14957 edits, then rawsize will be zero and we should use size). */
14958 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
14959 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
14960 unsigned int in_index, out_index;
14961 bfd_vma add_to_offsets = 0;
14962
14963 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
14964 {
14965 if (edit_node)
14966 {
14967 unsigned int edit_index = edit_node->index;
14968
14969 if (in_index < edit_index && in_index * 8 < input_size)
14970 {
14971 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
14972 contents + in_index * 8, add_to_offsets);
14973 out_index++;
14974 in_index++;
14975 }
14976 else if (in_index == edit_index
14977 || (in_index * 8 >= input_size
14978 && edit_index == UINT_MAX))
14979 {
14980 switch (edit_node->type)
14981 {
14982 case DELETE_EXIDX_ENTRY:
14983 in_index++;
14984 add_to_offsets += 8;
14985 break;
14986
14987 case INSERT_EXIDX_CANTUNWIND_AT_END:
14988 {
14989 asection *text_sec = edit_node->linked_section;
14990 bfd_vma text_offset = text_sec->output_section->vma
14991 + text_sec->output_offset
14992 + text_sec->size;
14993 bfd_vma exidx_offset = offset + out_index * 8;
14994 unsigned long prel31_offset;
14995
14996 /* Note: this is meant to be equivalent to an
14997 R_ARM_PREL31 relocation. These synthetic
14998 EXIDX_CANTUNWIND markers are not relocated by the
14999 usual BFD method. */
15000 prel31_offset = (text_offset - exidx_offset)
15001 & 0x7ffffffful;
15002
15003 /* First address we can't unwind. */
15004 bfd_put_32 (output_bfd, prel31_offset,
15005 &edited_contents[out_index * 8]);
15006
15007 /* Code for EXIDX_CANTUNWIND. */
15008 bfd_put_32 (output_bfd, 0x1,
15009 &edited_contents[out_index * 8 + 4]);
15010
15011 out_index++;
15012 add_to_offsets -= 8;
15013 }
15014 break;
15015 }
15016
15017 edit_node = edit_node->next;
15018 }
15019 }
15020 else
15021 {
15022 /* No more edits, copy remaining entries verbatim. */
15023 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15024 contents + in_index * 8, add_to_offsets);
15025 out_index++;
15026 in_index++;
15027 }
15028 }
15029
15030 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15031 bfd_set_section_contents (output_bfd, sec->output_section,
15032 edited_contents,
15033 (file_ptr) sec->output_offset, sec->size);
15034
15035 return TRUE;
15036 }
15037
15038 /* Fix code to point to Cortex-A8 erratum stubs. */
15039 if (globals->fix_cortex_a8)
15040 {
15041 struct a8_branch_to_stub_data data;
15042
15043 data.writing_section = sec;
15044 data.contents = contents;
15045
15046 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15047 &data);
15048 }
15049
15050 if (mapcount == 0)
15051 return FALSE;
15052
15053 if (globals->byteswap_code)
15054 {
15055 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15056
15057 ptr = map[0].vma;
15058 for (i = 0; i < mapcount; i++)
15059 {
15060 if (i == mapcount - 1)
15061 end = sec->size;
15062 else
15063 end = map[i + 1].vma;
15064
15065 switch (map[i].type)
15066 {
15067 case 'a':
15068 /* Byte swap code words. */
15069 while (ptr + 3 < end)
15070 {
15071 tmp = contents[ptr];
15072 contents[ptr] = contents[ptr + 3];
15073 contents[ptr + 3] = tmp;
15074 tmp = contents[ptr + 1];
15075 contents[ptr + 1] = contents[ptr + 2];
15076 contents[ptr + 2] = tmp;
15077 ptr += 4;
15078 }
15079 break;
15080
15081 case 't':
15082 /* Byte swap code halfwords. */
15083 while (ptr + 1 < end)
15084 {
15085 tmp = contents[ptr];
15086 contents[ptr] = contents[ptr + 1];
15087 contents[ptr + 1] = tmp;
15088 ptr += 2;
15089 }
15090 break;
15091
15092 case 'd':
15093 /* Leave data alone. */
15094 break;
15095 }
15096 ptr = end;
15097 }
15098 }
15099
15100 free (map);
15101 arm_data->mapcount = -1;
15102 arm_data->mapsize = 0;
15103 arm_data->map = NULL;
15104
15105 return FALSE;
15106 }
15107
15108 /* Mangle thumb function symbols as we read them in. */
15109
15110 static bfd_boolean
15111 elf32_arm_swap_symbol_in (bfd * abfd,
15112 const void *psrc,
15113 const void *pshn,
15114 Elf_Internal_Sym *dst)
15115 {
15116 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15117 return FALSE;
15118
15119 /* New EABI objects mark thumb function symbols by setting the low bit of
15120 the address. */
15121 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC
15122 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15123 && (dst->st_value & 1))
15124 {
15125 dst->st_value &= ~(bfd_vma) 1;
15126 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15127 }
15128 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15129 {
15130 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15131 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15132 }
15133 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15134 dst->st_target_internal = ST_BRANCH_LONG;
15135 else
15136 dst->st_target_internal = ST_BRANCH_TO_ARM;
15137
15138 return TRUE;
15139 }
15140
15141
15142 /* Mangle thumb function symbols as we write them out. */
15143
15144 static void
15145 elf32_arm_swap_symbol_out (bfd *abfd,
15146 const Elf_Internal_Sym *src,
15147 void *cdst,
15148 void *shndx)
15149 {
15150 Elf_Internal_Sym newsym;
15151
15152 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15153 of the address set, as per the new EABI. We do this unconditionally
15154 because objcopy does not set the elf header flags until after
15155 it writes out the symbol table. */
15156 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15157 {
15158 newsym = *src;
15159 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15160 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15161 if (newsym.st_shndx != SHN_UNDEF)
15162 {
15163 /* Do this only for defined symbols. At link type, the static
15164 linker will simulate the work of dynamic linker of resolving
15165 symbols and will carry over the thumbness of found symbols to
15166 the output symbol table. It's not clear how it happens, but
15167 the thumbness of undefined symbols can well be different at
15168 runtime, and writing '1' for them will be confusing for users
15169 and possibly for dynamic linker itself.
15170 */
15171 newsym.st_value |= 1;
15172 }
15173
15174 src = &newsym;
15175 }
15176 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15177 }
15178
15179 /* Add the PT_ARM_EXIDX program header. */
15180
15181 static bfd_boolean
15182 elf32_arm_modify_segment_map (bfd *abfd,
15183 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15184 {
15185 struct elf_segment_map *m;
15186 asection *sec;
15187
15188 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15189 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15190 {
15191 /* If there is already a PT_ARM_EXIDX header, then we do not
15192 want to add another one. This situation arises when running
15193 "strip"; the input binary already has the header. */
15194 m = elf_tdata (abfd)->segment_map;
15195 while (m && m->p_type != PT_ARM_EXIDX)
15196 m = m->next;
15197 if (!m)
15198 {
15199 m = (struct elf_segment_map *)
15200 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15201 if (m == NULL)
15202 return FALSE;
15203 m->p_type = PT_ARM_EXIDX;
15204 m->count = 1;
15205 m->sections[0] = sec;
15206
15207 m->next = elf_tdata (abfd)->segment_map;
15208 elf_tdata (abfd)->segment_map = m;
15209 }
15210 }
15211
15212 return TRUE;
15213 }
15214
15215 /* We may add a PT_ARM_EXIDX program header. */
15216
15217 static int
15218 elf32_arm_additional_program_headers (bfd *abfd,
15219 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15220 {
15221 asection *sec;
15222
15223 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15224 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15225 return 1;
15226 else
15227 return 0;
15228 }
15229
15230 /* Hook called by the linker routine which adds symbols from an object
15231 file. */
15232
15233 static bfd_boolean
15234 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15235 Elf_Internal_Sym *sym, const char **namep,
15236 flagword *flagsp, asection **secp, bfd_vma *valp)
15237 {
15238 if ((abfd->flags & DYNAMIC) == 0
15239 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15240 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15241 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15242
15243 if (elf32_arm_hash_table (info)->vxworks_p
15244 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15245 flagsp, secp, valp))
15246 return FALSE;
15247
15248 return TRUE;
15249 }
15250
15251 /* We use this to override swap_symbol_in and swap_symbol_out. */
15252 const struct elf_size_info elf32_arm_size_info =
15253 {
15254 sizeof (Elf32_External_Ehdr),
15255 sizeof (Elf32_External_Phdr),
15256 sizeof (Elf32_External_Shdr),
15257 sizeof (Elf32_External_Rel),
15258 sizeof (Elf32_External_Rela),
15259 sizeof (Elf32_External_Sym),
15260 sizeof (Elf32_External_Dyn),
15261 sizeof (Elf_External_Note),
15262 4,
15263 1,
15264 32, 2,
15265 ELFCLASS32, EV_CURRENT,
15266 bfd_elf32_write_out_phdrs,
15267 bfd_elf32_write_shdrs_and_ehdr,
15268 bfd_elf32_checksum_contents,
15269 bfd_elf32_write_relocs,
15270 elf32_arm_swap_symbol_in,
15271 elf32_arm_swap_symbol_out,
15272 bfd_elf32_slurp_reloc_table,
15273 bfd_elf32_slurp_symbol_table,
15274 bfd_elf32_swap_dyn_in,
15275 bfd_elf32_swap_dyn_out,
15276 bfd_elf32_swap_reloc_in,
15277 bfd_elf32_swap_reloc_out,
15278 bfd_elf32_swap_reloca_in,
15279 bfd_elf32_swap_reloca_out
15280 };
15281
15282 #define ELF_ARCH bfd_arch_arm
15283 #define ELF_TARGET_ID ARM_ELF_DATA
15284 #define ELF_MACHINE_CODE EM_ARM
15285 #ifdef __QNXTARGET__
15286 #define ELF_MAXPAGESIZE 0x1000
15287 #else
15288 #define ELF_MAXPAGESIZE 0x8000
15289 #endif
15290 #define ELF_MINPAGESIZE 0x1000
15291 #define ELF_COMMONPAGESIZE 0x1000
15292
15293 #define bfd_elf32_mkobject elf32_arm_mkobject
15294
15295 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
15296 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
15297 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
15298 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
15299 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
15300 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
15301 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
15302 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
15303 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
15304 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
15305 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
15306 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
15307 #define bfd_elf32_bfd_final_link elf32_arm_final_link
15308
15309 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
15310 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
15311 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
15312 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
15313 #define elf_backend_check_relocs elf32_arm_check_relocs
15314 #define elf_backend_relocate_section elf32_arm_relocate_section
15315 #define elf_backend_write_section elf32_arm_write_section
15316 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
15317 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
15318 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
15319 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
15320 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
15321 #define elf_backend_always_size_sections elf32_arm_always_size_sections
15322 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
15323 #define elf_backend_post_process_headers elf32_arm_post_process_headers
15324 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
15325 #define elf_backend_object_p elf32_arm_object_p
15326 #define elf_backend_section_flags elf32_arm_section_flags
15327 #define elf_backend_fake_sections elf32_arm_fake_sections
15328 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
15329 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15330 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
15331 #define elf_backend_size_info elf32_arm_size_info
15332 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15333 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
15334 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
15335 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
15336 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
15337
15338 #define elf_backend_can_refcount 1
15339 #define elf_backend_can_gc_sections 1
15340 #define elf_backend_plt_readonly 1
15341 #define elf_backend_want_got_plt 1
15342 #define elf_backend_want_plt_sym 0
15343 #define elf_backend_may_use_rel_p 1
15344 #define elf_backend_may_use_rela_p 0
15345 #define elf_backend_default_use_rela_p 0
15346
15347 #define elf_backend_got_header_size 12
15348
15349 #undef elf_backend_obj_attrs_vendor
15350 #define elf_backend_obj_attrs_vendor "aeabi"
15351 #undef elf_backend_obj_attrs_section
15352 #define elf_backend_obj_attrs_section ".ARM.attributes"
15353 #undef elf_backend_obj_attrs_arg_type
15354 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
15355 #undef elf_backend_obj_attrs_section_type
15356 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
15357 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
15358 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
15359
15360 #include "elf32-target.h"
15361
15362 /* VxWorks Targets. */
15363
15364 #undef TARGET_LITTLE_SYM
15365 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
15366 #undef TARGET_LITTLE_NAME
15367 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
15368 #undef TARGET_BIG_SYM
15369 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
15370 #undef TARGET_BIG_NAME
15371 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
15372
15373 /* Like elf32_arm_link_hash_table_create -- but overrides
15374 appropriately for VxWorks. */
15375
15376 static struct bfd_link_hash_table *
15377 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
15378 {
15379 struct bfd_link_hash_table *ret;
15380
15381 ret = elf32_arm_link_hash_table_create (abfd);
15382 if (ret)
15383 {
15384 struct elf32_arm_link_hash_table *htab
15385 = (struct elf32_arm_link_hash_table *) ret;
15386 htab->use_rel = 0;
15387 htab->vxworks_p = 1;
15388 }
15389 return ret;
15390 }
15391
15392 static void
15393 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
15394 {
15395 elf32_arm_final_write_processing (abfd, linker);
15396 elf_vxworks_final_write_processing (abfd, linker);
15397 }
15398
15399 #undef elf32_bed
15400 #define elf32_bed elf32_arm_vxworks_bed
15401
15402 #undef bfd_elf32_bfd_link_hash_table_create
15403 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
15404 #undef elf_backend_final_write_processing
15405 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
15406 #undef elf_backend_emit_relocs
15407 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
15408
15409 #undef elf_backend_may_use_rel_p
15410 #define elf_backend_may_use_rel_p 0
15411 #undef elf_backend_may_use_rela_p
15412 #define elf_backend_may_use_rela_p 1
15413 #undef elf_backend_default_use_rela_p
15414 #define elf_backend_default_use_rela_p 1
15415 #undef elf_backend_want_plt_sym
15416 #define elf_backend_want_plt_sym 1
15417 #undef ELF_MAXPAGESIZE
15418 #define ELF_MAXPAGESIZE 0x1000
15419
15420 #include "elf32-target.h"
15421
15422
15423 /* Merge backend specific data from an object file to the output
15424 object file when linking. */
15425
15426 static bfd_boolean
15427 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
15428 {
15429 flagword out_flags;
15430 flagword in_flags;
15431 bfd_boolean flags_compatible = TRUE;
15432 asection *sec;
15433
15434 /* Check if we have the same endianess. */
15435 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
15436 return FALSE;
15437
15438 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
15439 return TRUE;
15440
15441 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
15442 return FALSE;
15443
15444 /* The input BFD must have had its flags initialised. */
15445 /* The following seems bogus to me -- The flags are initialized in
15446 the assembler but I don't think an elf_flags_init field is
15447 written into the object. */
15448 /* BFD_ASSERT (elf_flags_init (ibfd)); */
15449
15450 in_flags = elf_elfheader (ibfd)->e_flags;
15451 out_flags = elf_elfheader (obfd)->e_flags;
15452
15453 /* In theory there is no reason why we couldn't handle this. However
15454 in practice it isn't even close to working and there is no real
15455 reason to want it. */
15456 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
15457 && !(ibfd->flags & DYNAMIC)
15458 && (in_flags & EF_ARM_BE8))
15459 {
15460 _bfd_error_handler (_("error: %B is already in final BE8 format"),
15461 ibfd);
15462 return FALSE;
15463 }
15464
15465 if (!elf_flags_init (obfd))
15466 {
15467 /* If the input is the default architecture and had the default
15468 flags then do not bother setting the flags for the output
15469 architecture, instead allow future merges to do this. If no
15470 future merges ever set these flags then they will retain their
15471 uninitialised values, which surprise surprise, correspond
15472 to the default values. */
15473 if (bfd_get_arch_info (ibfd)->the_default
15474 && elf_elfheader (ibfd)->e_flags == 0)
15475 return TRUE;
15476
15477 elf_flags_init (obfd) = TRUE;
15478 elf_elfheader (obfd)->e_flags = in_flags;
15479
15480 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
15481 && bfd_get_arch_info (obfd)->the_default)
15482 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
15483
15484 return TRUE;
15485 }
15486
15487 /* Determine what should happen if the input ARM architecture
15488 does not match the output ARM architecture. */
15489 if (! bfd_arm_merge_machines (ibfd, obfd))
15490 return FALSE;
15491
15492 /* Identical flags must be compatible. */
15493 if (in_flags == out_flags)
15494 return TRUE;
15495
15496 /* Check to see if the input BFD actually contains any sections. If
15497 not, its flags may not have been initialised either, but it
15498 cannot actually cause any incompatiblity. Do not short-circuit
15499 dynamic objects; their section list may be emptied by
15500 elf_link_add_object_symbols.
15501
15502 Also check to see if there are no code sections in the input.
15503 In this case there is no need to check for code specific flags.
15504 XXX - do we need to worry about floating-point format compatability
15505 in data sections ? */
15506 if (!(ibfd->flags & DYNAMIC))
15507 {
15508 bfd_boolean null_input_bfd = TRUE;
15509 bfd_boolean only_data_sections = TRUE;
15510
15511 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
15512 {
15513 /* Ignore synthetic glue sections. */
15514 if (strcmp (sec->name, ".glue_7")
15515 && strcmp (sec->name, ".glue_7t"))
15516 {
15517 if ((bfd_get_section_flags (ibfd, sec)
15518 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15519 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15520 only_data_sections = FALSE;
15521
15522 null_input_bfd = FALSE;
15523 break;
15524 }
15525 }
15526
15527 if (null_input_bfd || only_data_sections)
15528 return TRUE;
15529 }
15530
15531 /* Complain about various flag mismatches. */
15532 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
15533 EF_ARM_EABI_VERSION (out_flags)))
15534 {
15535 _bfd_error_handler
15536 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
15537 ibfd, obfd,
15538 (in_flags & EF_ARM_EABIMASK) >> 24,
15539 (out_flags & EF_ARM_EABIMASK) >> 24);
15540 return FALSE;
15541 }
15542
15543 /* Not sure what needs to be checked for EABI versions >= 1. */
15544 /* VxWorks libraries do not use these flags. */
15545 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
15546 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
15547 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
15548 {
15549 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
15550 {
15551 _bfd_error_handler
15552 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
15553 ibfd, obfd,
15554 in_flags & EF_ARM_APCS_26 ? 26 : 32,
15555 out_flags & EF_ARM_APCS_26 ? 26 : 32);
15556 flags_compatible = FALSE;
15557 }
15558
15559 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
15560 {
15561 if (in_flags & EF_ARM_APCS_FLOAT)
15562 _bfd_error_handler
15563 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
15564 ibfd, obfd);
15565 else
15566 _bfd_error_handler
15567 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
15568 ibfd, obfd);
15569
15570 flags_compatible = FALSE;
15571 }
15572
15573 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
15574 {
15575 if (in_flags & EF_ARM_VFP_FLOAT)
15576 _bfd_error_handler
15577 (_("error: %B uses VFP instructions, whereas %B does not"),
15578 ibfd, obfd);
15579 else
15580 _bfd_error_handler
15581 (_("error: %B uses FPA instructions, whereas %B does not"),
15582 ibfd, obfd);
15583
15584 flags_compatible = FALSE;
15585 }
15586
15587 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
15588 {
15589 if (in_flags & EF_ARM_MAVERICK_FLOAT)
15590 _bfd_error_handler
15591 (_("error: %B uses Maverick instructions, whereas %B does not"),
15592 ibfd, obfd);
15593 else
15594 _bfd_error_handler
15595 (_("error: %B does not use Maverick instructions, whereas %B does"),
15596 ibfd, obfd);
15597
15598 flags_compatible = FALSE;
15599 }
15600
15601 #ifdef EF_ARM_SOFT_FLOAT
15602 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
15603 {
15604 /* We can allow interworking between code that is VFP format
15605 layout, and uses either soft float or integer regs for
15606 passing floating point arguments and results. We already
15607 know that the APCS_FLOAT flags match; similarly for VFP
15608 flags. */
15609 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
15610 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
15611 {
15612 if (in_flags & EF_ARM_SOFT_FLOAT)
15613 _bfd_error_handler
15614 (_("error: %B uses software FP, whereas %B uses hardware FP"),
15615 ibfd, obfd);
15616 else
15617 _bfd_error_handler
15618 (_("error: %B uses hardware FP, whereas %B uses software FP"),
15619 ibfd, obfd);
15620
15621 flags_compatible = FALSE;
15622 }
15623 }
15624 #endif
15625
15626 /* Interworking mismatch is only a warning. */
15627 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
15628 {
15629 if (in_flags & EF_ARM_INTERWORK)
15630 {
15631 _bfd_error_handler
15632 (_("Warning: %B supports interworking, whereas %B does not"),
15633 ibfd, obfd);
15634 }
15635 else
15636 {
15637 _bfd_error_handler
15638 (_("Warning: %B does not support interworking, whereas %B does"),
15639 ibfd, obfd);
15640 }
15641 }
15642 }
15643
15644 return flags_compatible;
15645 }
15646
15647
15648 /* Symbian OS Targets. */
15649
15650 #undef TARGET_LITTLE_SYM
15651 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
15652 #undef TARGET_LITTLE_NAME
15653 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
15654 #undef TARGET_BIG_SYM
15655 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
15656 #undef TARGET_BIG_NAME
15657 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
15658
15659 /* Like elf32_arm_link_hash_table_create -- but overrides
15660 appropriately for Symbian OS. */
15661
15662 static struct bfd_link_hash_table *
15663 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
15664 {
15665 struct bfd_link_hash_table *ret;
15666
15667 ret = elf32_arm_link_hash_table_create (abfd);
15668 if (ret)
15669 {
15670 struct elf32_arm_link_hash_table *htab
15671 = (struct elf32_arm_link_hash_table *)ret;
15672 /* There is no PLT header for Symbian OS. */
15673 htab->plt_header_size = 0;
15674 /* The PLT entries are each one instruction and one word. */
15675 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
15676 htab->symbian_p = 1;
15677 /* Symbian uses armv5t or above, so use_blx is always true. */
15678 htab->use_blx = 1;
15679 htab->root.is_relocatable_executable = 1;
15680 }
15681 return ret;
15682 }
15683
15684 static const struct bfd_elf_special_section
15685 elf32_arm_symbian_special_sections[] =
15686 {
15687 /* In a BPABI executable, the dynamic linking sections do not go in
15688 the loadable read-only segment. The post-linker may wish to
15689 refer to these sections, but they are not part of the final
15690 program image. */
15691 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
15692 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
15693 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
15694 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
15695 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
15696 /* These sections do not need to be writable as the SymbianOS
15697 postlinker will arrange things so that no dynamic relocation is
15698 required. */
15699 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
15700 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
15701 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
15702 { NULL, 0, 0, 0, 0 }
15703 };
15704
15705 static void
15706 elf32_arm_symbian_begin_write_processing (bfd *abfd,
15707 struct bfd_link_info *link_info)
15708 {
15709 /* BPABI objects are never loaded directly by an OS kernel; they are
15710 processed by a postlinker first, into an OS-specific format. If
15711 the D_PAGED bit is set on the file, BFD will align segments on
15712 page boundaries, so that an OS can directly map the file. With
15713 BPABI objects, that just results in wasted space. In addition,
15714 because we clear the D_PAGED bit, map_sections_to_segments will
15715 recognize that the program headers should not be mapped into any
15716 loadable segment. */
15717 abfd->flags &= ~D_PAGED;
15718 elf32_arm_begin_write_processing (abfd, link_info);
15719 }
15720
15721 static bfd_boolean
15722 elf32_arm_symbian_modify_segment_map (bfd *abfd,
15723 struct bfd_link_info *info)
15724 {
15725 struct elf_segment_map *m;
15726 asection *dynsec;
15727
15728 /* BPABI shared libraries and executables should have a PT_DYNAMIC
15729 segment. However, because the .dynamic section is not marked
15730 with SEC_LOAD, the generic ELF code will not create such a
15731 segment. */
15732 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
15733 if (dynsec)
15734 {
15735 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
15736 if (m->p_type == PT_DYNAMIC)
15737 break;
15738
15739 if (m == NULL)
15740 {
15741 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
15742 m->next = elf_tdata (abfd)->segment_map;
15743 elf_tdata (abfd)->segment_map = m;
15744 }
15745 }
15746
15747 /* Also call the generic arm routine. */
15748 return elf32_arm_modify_segment_map (abfd, info);
15749 }
15750
15751 /* Return address for Ith PLT stub in section PLT, for relocation REL
15752 or (bfd_vma) -1 if it should not be included. */
15753
15754 static bfd_vma
15755 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
15756 const arelent *rel ATTRIBUTE_UNUSED)
15757 {
15758 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
15759 }
15760
15761
15762 #undef elf32_bed
15763 #define elf32_bed elf32_arm_symbian_bed
15764
15765 /* The dynamic sections are not allocated on SymbianOS; the postlinker
15766 will process them and then discard them. */
15767 #undef ELF_DYNAMIC_SEC_FLAGS
15768 #define ELF_DYNAMIC_SEC_FLAGS \
15769 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
15770
15771 #undef elf_backend_emit_relocs
15772
15773 #undef bfd_elf32_bfd_link_hash_table_create
15774 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
15775 #undef elf_backend_special_sections
15776 #define elf_backend_special_sections elf32_arm_symbian_special_sections
15777 #undef elf_backend_begin_write_processing
15778 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
15779 #undef elf_backend_final_write_processing
15780 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15781
15782 #undef elf_backend_modify_segment_map
15783 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
15784
15785 /* There is no .got section for BPABI objects, and hence no header. */
15786 #undef elf_backend_got_header_size
15787 #define elf_backend_got_header_size 0
15788
15789 /* Similarly, there is no .got.plt section. */
15790 #undef elf_backend_want_got_plt
15791 #define elf_backend_want_got_plt 0
15792
15793 #undef elf_backend_plt_sym_val
15794 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
15795
15796 #undef elf_backend_may_use_rel_p
15797 #define elf_backend_may_use_rel_p 1
15798 #undef elf_backend_may_use_rela_p
15799 #define elf_backend_may_use_rela_p 0
15800 #undef elf_backend_default_use_rela_p
15801 #define elf_backend_default_use_rela_p 0
15802 #undef elf_backend_want_plt_sym
15803 #define elf_backend_want_plt_sym 0
15804 #undef ELF_MAXPAGESIZE
15805 #define ELF_MAXPAGESIZE 0x8000
15806
15807 #include "elf32-target.h"
This page took 0.533038 seconds and 4 git commands to generate.