cc9ce59f3b044a45b27429afa9ec03c8fe751ec9
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73 static reloc_howto_type elf32_arm_howto_table_1[] =
74 {
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 24, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602 };
1603
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1611 {
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667 };
1668
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1671 {
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680 }
1681
1682 static void
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685 {
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690 }
1691
1692 struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1725 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1726 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1727 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1728 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1729 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1730 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1731 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1732 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1733 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1734 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1735 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1736 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1737 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1738 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1739 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1740 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1741 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1742 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1743 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1744 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1745 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1746 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1747 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1748 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1750 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1751 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1752 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1754 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1755 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1756 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1757 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1758 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1759 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1760 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1761 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1762 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1763 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1764 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1765 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1766 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1768 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1769 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1770 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1771 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1772 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1773 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1774 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1775 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1776 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1777 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1778 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1779 };
1780
1781 static reloc_howto_type *
1782 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1783 bfd_reloc_code_real_type code)
1784 {
1785 unsigned int i;
1786
1787 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1788 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1789 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1790
1791 return NULL;
1792 }
1793
1794 static reloc_howto_type *
1795 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1796 const char *r_name)
1797 {
1798 unsigned int i;
1799
1800 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1801 if (elf32_arm_howto_table_1[i].name != NULL
1802 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1803 return &elf32_arm_howto_table_1[i];
1804
1805 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1806 if (elf32_arm_howto_table_2[i].name != NULL
1807 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1808 return &elf32_arm_howto_table_2[i];
1809
1810 return NULL;
1811 }
1812
1813 /* Support for core dump NOTE sections. */
1814
1815 static bfd_boolean
1816 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1817 {
1818 int offset;
1819 size_t size;
1820
1821 switch (note->descsz)
1822 {
1823 default:
1824 return FALSE;
1825
1826 case 148: /* Linux/ARM 32-bit. */
1827 /* pr_cursig */
1828 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1829
1830 /* pr_pid */
1831 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1832
1833 /* pr_reg */
1834 offset = 72;
1835 size = 72;
1836
1837 break;
1838 }
1839
1840 /* Make a ".reg/999" section. */
1841 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1842 size, note->descpos + offset);
1843 }
1844
1845 static bfd_boolean
1846 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1847 {
1848 switch (note->descsz)
1849 {
1850 default:
1851 return FALSE;
1852
1853 case 124: /* Linux/ARM elf_prpsinfo. */
1854 elf_tdata (abfd)->core_program
1855 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1856 elf_tdata (abfd)->core_command
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1858 }
1859
1860 /* Note that for some reason, a spurious space is tacked
1861 onto the end of the args in some (at least one anyway)
1862 implementations, so strip it off if it exists. */
1863 {
1864 char *command = elf_tdata (abfd)->core_command;
1865 int n = strlen (command);
1866
1867 if (0 < n && command[n - 1] == ' ')
1868 command[n - 1] = '\0';
1869 }
1870
1871 return TRUE;
1872 }
1873
1874 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1875 #define TARGET_LITTLE_NAME "elf32-littlearm"
1876 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1877 #define TARGET_BIG_NAME "elf32-bigarm"
1878
1879 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1880 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1881
1882 typedef unsigned long int insn32;
1883 typedef unsigned short int insn16;
1884
1885 /* In lieu of proper flags, assume all EABIv4 or later objects are
1886 interworkable. */
1887 #define INTERWORK_FLAG(abfd) \
1888 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1889 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1890 || ((abfd)->flags & BFD_LINKER_CREATED))
1891
1892 /* The linker script knows the section names for placement.
1893 The entry_names are used to do simple name mangling on the stubs.
1894 Given a function name, and its type, the stub can be found. The
1895 name can be changed. The only requirement is the %s be present. */
1896 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1897 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1898
1899 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1900 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1901
1902 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1903 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1904
1905 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1906 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1907
1908 #define STUB_ENTRY_NAME "__%s_veneer"
1909
1910 /* The name of the dynamic interpreter. This is put in the .interp
1911 section. */
1912 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1913
1914 #ifdef FOUR_WORD_PLT
1915
1916 /* The first entry in a procedure linkage table looks like
1917 this. It is set up so that any shared library function that is
1918 called before the relocation has been set up calls the dynamic
1919 linker first. */
1920 static const bfd_vma elf32_arm_plt0_entry [] =
1921 {
1922 0xe52de004, /* str lr, [sp, #-4]! */
1923 0xe59fe010, /* ldr lr, [pc, #16] */
1924 0xe08fe00e, /* add lr, pc, lr */
1925 0xe5bef008, /* ldr pc, [lr, #8]! */
1926 };
1927
1928 /* Subsequent entries in a procedure linkage table look like
1929 this. */
1930 static const bfd_vma elf32_arm_plt_entry [] =
1931 {
1932 0xe28fc600, /* add ip, pc, #NN */
1933 0xe28cca00, /* add ip, ip, #NN */
1934 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1935 0x00000000, /* unused */
1936 };
1937
1938 #else
1939
1940 /* The first entry in a procedure linkage table looks like
1941 this. It is set up so that any shared library function that is
1942 called before the relocation has been set up calls the dynamic
1943 linker first. */
1944 static const bfd_vma elf32_arm_plt0_entry [] =
1945 {
1946 0xe52de004, /* str lr, [sp, #-4]! */
1947 0xe59fe004, /* ldr lr, [pc, #4] */
1948 0xe08fe00e, /* add lr, pc, lr */
1949 0xe5bef008, /* ldr pc, [lr, #8]! */
1950 0x00000000, /* &GOT[0] - . */
1951 };
1952
1953 /* Subsequent entries in a procedure linkage table look like
1954 this. */
1955 static const bfd_vma elf32_arm_plt_entry [] =
1956 {
1957 0xe28fc600, /* add ip, pc, #0xNN00000 */
1958 0xe28cca00, /* add ip, ip, #0xNN000 */
1959 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1960 };
1961
1962 #endif
1963
1964 /* The format of the first entry in the procedure linkage table
1965 for a VxWorks executable. */
1966 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1967 {
1968 0xe52dc008, /* str ip,[sp,#-8]! */
1969 0xe59fc000, /* ldr ip,[pc] */
1970 0xe59cf008, /* ldr pc,[ip,#8] */
1971 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1972 };
1973
1974 /* The format of subsequent entries in a VxWorks executable. */
1975 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1976 {
1977 0xe59fc000, /* ldr ip,[pc] */
1978 0xe59cf000, /* ldr pc,[ip] */
1979 0x00000000, /* .long @got */
1980 0xe59fc000, /* ldr ip,[pc] */
1981 0xea000000, /* b _PLT */
1982 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1983 };
1984
1985 /* The format of entries in a VxWorks shared library. */
1986 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1987 {
1988 0xe59fc000, /* ldr ip,[pc] */
1989 0xe79cf009, /* ldr pc,[ip,r9] */
1990 0x00000000, /* .long @got */
1991 0xe59fc000, /* ldr ip,[pc] */
1992 0xe599f008, /* ldr pc,[r9,#8] */
1993 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1994 };
1995
1996 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1997 #define PLT_THUMB_STUB_SIZE 4
1998 static const bfd_vma elf32_arm_plt_thumb_stub [] =
1999 {
2000 0x4778, /* bx pc */
2001 0x46c0 /* nop */
2002 };
2003
2004 /* The entries in a PLT when using a DLL-based target with multiple
2005 address spaces. */
2006 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2007 {
2008 0xe51ff004, /* ldr pc, [pc, #-4] */
2009 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2010 };
2011
2012 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2013 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2014 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2015 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2016 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2017 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2018
2019 enum stub_insn_type
2020 {
2021 THUMB16_TYPE = 1,
2022 THUMB32_TYPE,
2023 ARM_TYPE,
2024 DATA_TYPE
2025 };
2026
2027 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2028 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2029 is inserted in arm_build_one_stub(). */
2030 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2031 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2032 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2033 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2034 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2035 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2036
2037 typedef struct
2038 {
2039 bfd_vma data;
2040 enum stub_insn_type type;
2041 unsigned int r_type;
2042 int reloc_addend;
2043 } insn_sequence;
2044
2045 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2046 to reach the stub if necessary. */
2047 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2048 {
2049 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2050 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2051 };
2052
2053 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2054 available. */
2055 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2056 {
2057 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2058 ARM_INSN(0xe12fff1c), /* bx ip */
2059 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2060 };
2061
2062 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2063 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2064 {
2065 THUMB16_INSN(0xb401), /* push {r0} */
2066 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2067 THUMB16_INSN(0x4684), /* mov ip, r0 */
2068 THUMB16_INSN(0xbc01), /* pop {r0} */
2069 THUMB16_INSN(0x4760), /* bx ip */
2070 THUMB16_INSN(0xbf00), /* nop */
2071 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2072 };
2073
2074 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2075 allowed. */
2076 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2077 {
2078 THUMB16_INSN(0x4778), /* bx pc */
2079 THUMB16_INSN(0x46c0), /* nop */
2080 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2081 ARM_INSN(0xe12fff1c), /* bx ip */
2082 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2083 };
2084
2085 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2086 available. */
2087 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2088 {
2089 THUMB16_INSN(0x4778), /* bx pc */
2090 THUMB16_INSN(0x46c0), /* nop */
2091 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2092 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2093 };
2094
2095 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2096 one, when the destination is close enough. */
2097 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2098 {
2099 THUMB16_INSN(0x4778), /* bx pc */
2100 THUMB16_INSN(0x46c0), /* nop */
2101 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2102 };
2103
2104 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2105 blx to reach the stub if necessary. */
2106 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2107 {
2108 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2109 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2110 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2111 };
2112
2113 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2114 blx to reach the stub if necessary. We can not add into pc;
2115 it is not guaranteed to mode switch (different in ARMv6 and
2116 ARMv7). */
2117 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2118 {
2119 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2120 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2121 ARM_INSN(0xe12fff1c), /* bx ip */
2122 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2123 };
2124
2125 /* V4T ARM -> ARM long branch stub, PIC. */
2126 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2127 {
2128 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2129 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2130 ARM_INSN(0xe12fff1c), /* bx ip */
2131 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2132 };
2133
2134 /* V4T Thumb -> ARM long branch stub, PIC. */
2135 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2136 {
2137 THUMB16_INSN(0x4778), /* bx pc */
2138 THUMB16_INSN(0x46c0), /* nop */
2139 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2140 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2141 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2142 };
2143
2144 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2145 architectures. */
2146 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2147 {
2148 THUMB16_INSN(0xb401), /* push {r0} */
2149 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2150 THUMB16_INSN(0x46fc), /* mov ip, pc */
2151 THUMB16_INSN(0x4484), /* add ip, r0 */
2152 THUMB16_INSN(0xbc01), /* pop {r0} */
2153 THUMB16_INSN(0x4760), /* bx ip */
2154 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2155 };
2156
2157 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2158 allowed. */
2159 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2160 {
2161 THUMB16_INSN(0x4778), /* bx pc */
2162 THUMB16_INSN(0x46c0), /* nop */
2163 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2164 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2165 ARM_INSN(0xe12fff1c), /* bx ip */
2166 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2167 };
2168
2169 /* Cortex-A8 erratum-workaround stubs. */
2170
2171 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2172 can't use a conditional branch to reach this stub). */
2173
2174 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2175 {
2176 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2177 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2178 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2179 };
2180
2181 /* Stub used for b.w and bl.w instructions. */
2182
2183 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2184 {
2185 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2186 };
2187
2188 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2189 {
2190 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2191 };
2192
2193 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2194 instruction (which switches to ARM mode) to point to this stub. Jump to the
2195 real destination using an ARM-mode branch. */
2196
2197 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2198 {
2199 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2200 };
2201
2202 /* Section name for stubs is the associated section name plus this
2203 string. */
2204 #define STUB_SUFFIX ".stub"
2205
2206 /* One entry per long/short branch stub defined above. */
2207 #define DEF_STUBS \
2208 DEF_STUB(long_branch_any_any) \
2209 DEF_STUB(long_branch_v4t_arm_thumb) \
2210 DEF_STUB(long_branch_thumb_only) \
2211 DEF_STUB(long_branch_v4t_thumb_thumb) \
2212 DEF_STUB(long_branch_v4t_thumb_arm) \
2213 DEF_STUB(short_branch_v4t_thumb_arm) \
2214 DEF_STUB(long_branch_any_arm_pic) \
2215 DEF_STUB(long_branch_any_thumb_pic) \
2216 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2219 DEF_STUB(long_branch_thumb_only_pic) \
2220 DEF_STUB(a8_veneer_b_cond) \
2221 DEF_STUB(a8_veneer_b) \
2222 DEF_STUB(a8_veneer_bl) \
2223 DEF_STUB(a8_veneer_blx)
2224
2225 #define DEF_STUB(x) arm_stub_##x,
2226 enum elf32_arm_stub_type {
2227 arm_stub_none,
2228 DEF_STUBS
2229 /* Note the first a8_veneer type */
2230 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template_sequence;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 int st_type;
2403 };
2404
2405 /* A table of relocs applied to branches which might trigger Cortex-A8
2406 erratum. */
2407
2408 struct a8_erratum_reloc {
2409 bfd_vma from;
2410 bfd_vma destination;
2411 unsigned int r_type;
2412 unsigned char st_type;
2413 const char *sym_name;
2414 bfd_boolean non_a8_stub;
2415 };
2416
2417 /* The size of the thread control block. */
2418 #define TCB_SIZE 8
2419
2420 struct elf_arm_obj_tdata
2421 {
2422 struct elf_obj_tdata root;
2423
2424 /* tls_type for each local got entry. */
2425 char *local_got_tls_type;
2426
2427 /* Zero to warn when linking objects with incompatible enum sizes. */
2428 int no_enum_size_warning;
2429
2430 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2431 int no_wchar_size_warning;
2432 };
2433
2434 #define elf_arm_tdata(bfd) \
2435 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2436
2437 #define elf32_arm_local_got_tls_type(bfd) \
2438 (elf_arm_tdata (bfd)->local_got_tls_type)
2439
2440 #define is_arm_elf(bfd) \
2441 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2442 && elf_tdata (bfd) != NULL \
2443 && elf_object_id (bfd) == ARM_ELF_DATA)
2444
2445 static bfd_boolean
2446 elf32_arm_mkobject (bfd *abfd)
2447 {
2448 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2449 ARM_ELF_DATA);
2450 }
2451
2452 /* The ARM linker needs to keep track of the number of relocs that it
2453 decides to copy in check_relocs for each symbol. This is so that
2454 it can discard PC relative relocs if it doesn't need them when
2455 linking with -Bsymbolic. We store the information in a field
2456 extending the regular ELF linker hash table. */
2457
2458 /* This structure keeps track of the number of relocs we have copied
2459 for a given symbol. */
2460 struct elf32_arm_relocs_copied
2461 {
2462 /* Next section. */
2463 struct elf32_arm_relocs_copied * next;
2464 /* A section in dynobj. */
2465 asection * section;
2466 /* Number of relocs copied in this section. */
2467 bfd_size_type count;
2468 /* Number of PC-relative relocs copied in this section. */
2469 bfd_size_type pc_count;
2470 };
2471
2472 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2473
2474 /* Arm ELF linker hash entry. */
2475 struct elf32_arm_link_hash_entry
2476 {
2477 struct elf_link_hash_entry root;
2478
2479 /* Number of PC relative relocs copied for this symbol. */
2480 struct elf32_arm_relocs_copied * relocs_copied;
2481
2482 /* We reference count Thumb references to a PLT entry separately,
2483 so that we can emit the Thumb trampoline only if needed. */
2484 bfd_signed_vma plt_thumb_refcount;
2485
2486 /* Some references from Thumb code may be eliminated by BL->BLX
2487 conversion, so record them separately. */
2488 bfd_signed_vma plt_maybe_thumb_refcount;
2489
2490 /* Since PLT entries have variable size if the Thumb prologue is
2491 used, we need to record the index into .got.plt instead of
2492 recomputing it from the PLT offset. */
2493 bfd_signed_vma plt_got_offset;
2494
2495 #define GOT_UNKNOWN 0
2496 #define GOT_NORMAL 1
2497 #define GOT_TLS_GD 2
2498 #define GOT_TLS_IE 4
2499 unsigned char tls_type;
2500
2501 /* The symbol marking the real symbol location for exported thumb
2502 symbols with Arm stubs. */
2503 struct elf_link_hash_entry *export_glue;
2504
2505 /* A pointer to the most recently used stub hash entry against this
2506 symbol. */
2507 struct elf32_arm_stub_hash_entry *stub_cache;
2508 };
2509
2510 /* Traverse an arm ELF linker hash table. */
2511 #define elf32_arm_link_hash_traverse(table, func, info) \
2512 (elf_link_hash_traverse \
2513 (&(table)->root, \
2514 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2515 (info)))
2516
2517 /* Get the ARM elf linker hash table from a link_info structure. */
2518 #define elf32_arm_hash_table(info) \
2519 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2520 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2521
2522 #define arm_stub_hash_lookup(table, string, create, copy) \
2523 ((struct elf32_arm_stub_hash_entry *) \
2524 bfd_hash_lookup ((table), (string), (create), (copy)))
2525
2526 /* Array to keep track of which stub sections have been created, and
2527 information on stub grouping. */
2528 struct map_stub
2529 {
2530 /* This is the section to which stubs in the group will be
2531 attached. */
2532 asection *link_sec;
2533 /* The stub section. */
2534 asection *stub_sec;
2535 };
2536
2537 /* ARM ELF linker hash table. */
2538 struct elf32_arm_link_hash_table
2539 {
2540 /* The main hash table. */
2541 struct elf_link_hash_table root;
2542
2543 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2544 bfd_size_type thumb_glue_size;
2545
2546 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2547 bfd_size_type arm_glue_size;
2548
2549 /* The size in bytes of section containing the ARMv4 BX veneers. */
2550 bfd_size_type bx_glue_size;
2551
2552 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2553 veneer has been populated. */
2554 bfd_vma bx_glue_offset[15];
2555
2556 /* The size in bytes of the section containing glue for VFP11 erratum
2557 veneers. */
2558 bfd_size_type vfp11_erratum_glue_size;
2559
2560 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2561 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2562 elf32_arm_write_section(). */
2563 struct a8_erratum_fix *a8_erratum_fixes;
2564 unsigned int num_a8_erratum_fixes;
2565
2566 /* An arbitrary input BFD chosen to hold the glue sections. */
2567 bfd * bfd_of_glue_owner;
2568
2569 /* Nonzero to output a BE8 image. */
2570 int byteswap_code;
2571
2572 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2573 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2574 int target1_is_rel;
2575
2576 /* The relocation to use for R_ARM_TARGET2 relocations. */
2577 int target2_reloc;
2578
2579 /* 0 = Ignore R_ARM_V4BX.
2580 1 = Convert BX to MOV PC.
2581 2 = Generate v4 interworing stubs. */
2582 int fix_v4bx;
2583
2584 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2585 int fix_cortex_a8;
2586
2587 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2588 int use_blx;
2589
2590 /* What sort of code sequences we should look for which may trigger the
2591 VFP11 denorm erratum. */
2592 bfd_arm_vfp11_fix vfp11_fix;
2593
2594 /* Global counter for the number of fixes we have emitted. */
2595 int num_vfp11_fixes;
2596
2597 /* Nonzero to force PIC branch veneers. */
2598 int pic_veneer;
2599
2600 /* The number of bytes in the initial entry in the PLT. */
2601 bfd_size_type plt_header_size;
2602
2603 /* The number of bytes in the subsequent PLT etries. */
2604 bfd_size_type plt_entry_size;
2605
2606 /* True if the target system is VxWorks. */
2607 int vxworks_p;
2608
2609 /* True if the target system is Symbian OS. */
2610 int symbian_p;
2611
2612 /* True if the target uses REL relocations. */
2613 int use_rel;
2614
2615 /* Short-cuts to get to dynamic linker sections. */
2616 asection *sgot;
2617 asection *sgotplt;
2618 asection *srelgot;
2619 asection *splt;
2620 asection *srelplt;
2621 asection *sdynbss;
2622 asection *srelbss;
2623
2624 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2625 asection *srelplt2;
2626
2627 /* Data for R_ARM_TLS_LDM32 relocations. */
2628 union
2629 {
2630 bfd_signed_vma refcount;
2631 bfd_vma offset;
2632 } tls_ldm_got;
2633
2634 /* Small local sym cache. */
2635 struct sym_cache sym_cache;
2636
2637 /* For convenience in allocate_dynrelocs. */
2638 bfd * obfd;
2639
2640 /* The stub hash table. */
2641 struct bfd_hash_table stub_hash_table;
2642
2643 /* Linker stub bfd. */
2644 bfd *stub_bfd;
2645
2646 /* Linker call-backs. */
2647 asection * (*add_stub_section) (const char *, asection *);
2648 void (*layout_sections_again) (void);
2649
2650 /* Array to keep track of which stub sections have been created, and
2651 information on stub grouping. */
2652 struct map_stub *stub_group;
2653
2654 /* Number of elements in stub_group. */
2655 int top_id;
2656
2657 /* Assorted information used by elf32_arm_size_stubs. */
2658 unsigned int bfd_count;
2659 int top_index;
2660 asection **input_list;
2661 };
2662
2663 /* Create an entry in an ARM ELF linker hash table. */
2664
2665 static struct bfd_hash_entry *
2666 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2667 struct bfd_hash_table * table,
2668 const char * string)
2669 {
2670 struct elf32_arm_link_hash_entry * ret =
2671 (struct elf32_arm_link_hash_entry *) entry;
2672
2673 /* Allocate the structure if it has not already been allocated by a
2674 subclass. */
2675 if (ret == NULL)
2676 ret = (struct elf32_arm_link_hash_entry *)
2677 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2678 if (ret == NULL)
2679 return (struct bfd_hash_entry *) ret;
2680
2681 /* Call the allocation method of the superclass. */
2682 ret = ((struct elf32_arm_link_hash_entry *)
2683 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2684 table, string));
2685 if (ret != NULL)
2686 {
2687 ret->relocs_copied = NULL;
2688 ret->tls_type = GOT_UNKNOWN;
2689 ret->plt_thumb_refcount = 0;
2690 ret->plt_maybe_thumb_refcount = 0;
2691 ret->plt_got_offset = -1;
2692 ret->export_glue = NULL;
2693
2694 ret->stub_cache = NULL;
2695 }
2696
2697 return (struct bfd_hash_entry *) ret;
2698 }
2699
2700 /* Initialize an entry in the stub hash table. */
2701
2702 static struct bfd_hash_entry *
2703 stub_hash_newfunc (struct bfd_hash_entry *entry,
2704 struct bfd_hash_table *table,
2705 const char *string)
2706 {
2707 /* Allocate the structure if it has not already been allocated by a
2708 subclass. */
2709 if (entry == NULL)
2710 {
2711 entry = (struct bfd_hash_entry *)
2712 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2713 if (entry == NULL)
2714 return entry;
2715 }
2716
2717 /* Call the allocation method of the superclass. */
2718 entry = bfd_hash_newfunc (entry, table, string);
2719 if (entry != NULL)
2720 {
2721 struct elf32_arm_stub_hash_entry *eh;
2722
2723 /* Initialize the local fields. */
2724 eh = (struct elf32_arm_stub_hash_entry *) entry;
2725 eh->stub_sec = NULL;
2726 eh->stub_offset = 0;
2727 eh->target_value = 0;
2728 eh->target_section = NULL;
2729 eh->target_addend = 0;
2730 eh->orig_insn = 0;
2731 eh->stub_type = arm_stub_none;
2732 eh->stub_size = 0;
2733 eh->stub_template = NULL;
2734 eh->stub_template_size = 0;
2735 eh->h = NULL;
2736 eh->id_sec = NULL;
2737 eh->output_name = NULL;
2738 }
2739
2740 return entry;
2741 }
2742
2743 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2744 shortcuts to them in our hash table. */
2745
2746 static bfd_boolean
2747 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2748 {
2749 struct elf32_arm_link_hash_table *htab;
2750
2751 htab = elf32_arm_hash_table (info);
2752 if (htab == NULL)
2753 return FALSE;
2754
2755 /* BPABI objects never have a GOT, or associated sections. */
2756 if (htab->symbian_p)
2757 return TRUE;
2758
2759 if (! _bfd_elf_create_got_section (dynobj, info))
2760 return FALSE;
2761
2762 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2763 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2764 if (!htab->sgot || !htab->sgotplt)
2765 abort ();
2766
2767 htab->srelgot = bfd_get_section_by_name (dynobj,
2768 RELOC_SECTION (htab, ".got"));
2769 if (htab->srelgot == NULL)
2770 return FALSE;
2771 return TRUE;
2772 }
2773
2774 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2775 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2776 hash table. */
2777
2778 static bfd_boolean
2779 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2780 {
2781 struct elf32_arm_link_hash_table *htab;
2782
2783 htab = elf32_arm_hash_table (info);
2784 if (htab == NULL)
2785 return FALSE;
2786
2787 if (!htab->sgot && !create_got_section (dynobj, info))
2788 return FALSE;
2789
2790 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2791 return FALSE;
2792
2793 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2794 htab->srelplt = bfd_get_section_by_name (dynobj,
2795 RELOC_SECTION (htab, ".plt"));
2796 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2797 if (!info->shared)
2798 htab->srelbss = bfd_get_section_by_name (dynobj,
2799 RELOC_SECTION (htab, ".bss"));
2800
2801 if (htab->vxworks_p)
2802 {
2803 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2804 return FALSE;
2805
2806 if (info->shared)
2807 {
2808 htab->plt_header_size = 0;
2809 htab->plt_entry_size
2810 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2811 }
2812 else
2813 {
2814 htab->plt_header_size
2815 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2816 htab->plt_entry_size
2817 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2818 }
2819 }
2820
2821 if (!htab->splt
2822 || !htab->srelplt
2823 || !htab->sdynbss
2824 || (!info->shared && !htab->srelbss))
2825 abort ();
2826
2827 return TRUE;
2828 }
2829
2830 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2831
2832 static void
2833 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2834 struct elf_link_hash_entry *dir,
2835 struct elf_link_hash_entry *ind)
2836 {
2837 struct elf32_arm_link_hash_entry *edir, *eind;
2838
2839 edir = (struct elf32_arm_link_hash_entry *) dir;
2840 eind = (struct elf32_arm_link_hash_entry *) ind;
2841
2842 if (eind->relocs_copied != NULL)
2843 {
2844 if (edir->relocs_copied != NULL)
2845 {
2846 struct elf32_arm_relocs_copied **pp;
2847 struct elf32_arm_relocs_copied *p;
2848
2849 /* Add reloc counts against the indirect sym to the direct sym
2850 list. Merge any entries against the same section. */
2851 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2852 {
2853 struct elf32_arm_relocs_copied *q;
2854
2855 for (q = edir->relocs_copied; q != NULL; q = q->next)
2856 if (q->section == p->section)
2857 {
2858 q->pc_count += p->pc_count;
2859 q->count += p->count;
2860 *pp = p->next;
2861 break;
2862 }
2863 if (q == NULL)
2864 pp = &p->next;
2865 }
2866 *pp = edir->relocs_copied;
2867 }
2868
2869 edir->relocs_copied = eind->relocs_copied;
2870 eind->relocs_copied = NULL;
2871 }
2872
2873 if (ind->root.type == bfd_link_hash_indirect)
2874 {
2875 /* Copy over PLT info. */
2876 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2877 eind->plt_thumb_refcount = 0;
2878 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2879 eind->plt_maybe_thumb_refcount = 0;
2880
2881 if (dir->got.refcount <= 0)
2882 {
2883 edir->tls_type = eind->tls_type;
2884 eind->tls_type = GOT_UNKNOWN;
2885 }
2886 }
2887
2888 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2889 }
2890
2891 /* Create an ARM elf linker hash table. */
2892
2893 static struct bfd_link_hash_table *
2894 elf32_arm_link_hash_table_create (bfd *abfd)
2895 {
2896 struct elf32_arm_link_hash_table *ret;
2897 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2898
2899 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2900 if (ret == NULL)
2901 return NULL;
2902
2903 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2904 elf32_arm_link_hash_newfunc,
2905 sizeof (struct elf32_arm_link_hash_entry),
2906 ARM_ELF_DATA))
2907 {
2908 free (ret);
2909 return NULL;
2910 }
2911
2912 ret->sgot = NULL;
2913 ret->sgotplt = NULL;
2914 ret->srelgot = NULL;
2915 ret->splt = NULL;
2916 ret->srelplt = NULL;
2917 ret->sdynbss = NULL;
2918 ret->srelbss = NULL;
2919 ret->srelplt2 = NULL;
2920 ret->thumb_glue_size = 0;
2921 ret->arm_glue_size = 0;
2922 ret->bx_glue_size = 0;
2923 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2924 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2925 ret->vfp11_erratum_glue_size = 0;
2926 ret->num_vfp11_fixes = 0;
2927 ret->fix_cortex_a8 = 0;
2928 ret->bfd_of_glue_owner = NULL;
2929 ret->byteswap_code = 0;
2930 ret->target1_is_rel = 0;
2931 ret->target2_reloc = R_ARM_NONE;
2932 #ifdef FOUR_WORD_PLT
2933 ret->plt_header_size = 16;
2934 ret->plt_entry_size = 16;
2935 #else
2936 ret->plt_header_size = 20;
2937 ret->plt_entry_size = 12;
2938 #endif
2939 ret->fix_v4bx = 0;
2940 ret->use_blx = 0;
2941 ret->vxworks_p = 0;
2942 ret->symbian_p = 0;
2943 ret->use_rel = 1;
2944 ret->sym_cache.abfd = NULL;
2945 ret->obfd = abfd;
2946 ret->tls_ldm_got.refcount = 0;
2947 ret->stub_bfd = NULL;
2948 ret->add_stub_section = NULL;
2949 ret->layout_sections_again = NULL;
2950 ret->stub_group = NULL;
2951 ret->top_id = 0;
2952 ret->bfd_count = 0;
2953 ret->top_index = 0;
2954 ret->input_list = NULL;
2955
2956 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2957 sizeof (struct elf32_arm_stub_hash_entry)))
2958 {
2959 free (ret);
2960 return NULL;
2961 }
2962
2963 return &ret->root.root;
2964 }
2965
2966 /* Free the derived linker hash table. */
2967
2968 static void
2969 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2970 {
2971 struct elf32_arm_link_hash_table *ret
2972 = (struct elf32_arm_link_hash_table *) hash;
2973
2974 bfd_hash_table_free (&ret->stub_hash_table);
2975 _bfd_generic_link_hash_table_free (hash);
2976 }
2977
2978 /* Determine if we're dealing with a Thumb only architecture. */
2979
2980 static bfd_boolean
2981 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2982 {
2983 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2984 Tag_CPU_arch);
2985 int profile;
2986
2987 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2988 return FALSE;
2989
2990 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2991 Tag_CPU_arch_profile);
2992
2993 return profile == 'M';
2994 }
2995
2996 /* Determine if we're dealing with a Thumb-2 object. */
2997
2998 static bfd_boolean
2999 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3000 {
3001 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3002 Tag_CPU_arch);
3003 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3004 }
3005
3006 /* Determine what kind of NOPs are available. */
3007
3008 static bfd_boolean
3009 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3010 {
3011 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3012 Tag_CPU_arch);
3013 return arch == TAG_CPU_ARCH_V6T2
3014 || arch == TAG_CPU_ARCH_V6K
3015 || arch == TAG_CPU_ARCH_V7
3016 || arch == TAG_CPU_ARCH_V7E_M;
3017 }
3018
3019 static bfd_boolean
3020 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3021 {
3022 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3023 Tag_CPU_arch);
3024 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3025 || arch == TAG_CPU_ARCH_V7E_M);
3026 }
3027
3028 static bfd_boolean
3029 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3030 {
3031 switch (stub_type)
3032 {
3033 case arm_stub_long_branch_thumb_only:
3034 case arm_stub_long_branch_v4t_thumb_arm:
3035 case arm_stub_short_branch_v4t_thumb_arm:
3036 case arm_stub_long_branch_v4t_thumb_arm_pic:
3037 case arm_stub_long_branch_thumb_only_pic:
3038 return TRUE;
3039 case arm_stub_none:
3040 BFD_FAIL ();
3041 return FALSE;
3042 break;
3043 default:
3044 return FALSE;
3045 }
3046 }
3047
3048 /* Determine the type of stub needed, if any, for a call. */
3049
3050 static enum elf32_arm_stub_type
3051 arm_type_of_stub (struct bfd_link_info *info,
3052 asection *input_sec,
3053 const Elf_Internal_Rela *rel,
3054 int *actual_st_type,
3055 struct elf32_arm_link_hash_entry *hash,
3056 bfd_vma destination,
3057 asection *sym_sec,
3058 bfd *input_bfd,
3059 const char *name)
3060 {
3061 bfd_vma location;
3062 bfd_signed_vma branch_offset;
3063 unsigned int r_type;
3064 struct elf32_arm_link_hash_table * globals;
3065 int thumb2;
3066 int thumb_only;
3067 enum elf32_arm_stub_type stub_type = arm_stub_none;
3068 int use_plt = 0;
3069 int st_type = *actual_st_type;
3070
3071 /* We don't know the actual type of destination in case it is of
3072 type STT_SECTION: give up. */
3073 if (st_type == STT_SECTION)
3074 return stub_type;
3075
3076 globals = elf32_arm_hash_table (info);
3077 if (globals == NULL)
3078 return stub_type;
3079
3080 thumb_only = using_thumb_only (globals);
3081
3082 thumb2 = using_thumb2 (globals);
3083
3084 /* Determine where the call point is. */
3085 location = (input_sec->output_offset
3086 + input_sec->output_section->vma
3087 + rel->r_offset);
3088
3089 r_type = ELF32_R_TYPE (rel->r_info);
3090
3091 /* Keep a simpler condition, for the sake of clarity. */
3092 if (globals->splt != NULL
3093 && hash != NULL
3094 && hash->root.plt.offset != (bfd_vma) -1)
3095 {
3096 use_plt = 1;
3097
3098 /* Note when dealing with PLT entries: the main PLT stub is in
3099 ARM mode, so if the branch is in Thumb mode, another
3100 Thumb->ARM stub will be inserted later just before the ARM
3101 PLT stub. We don't take this extra distance into account
3102 here, because if a long branch stub is needed, we'll add a
3103 Thumb->Arm one and branch directly to the ARM PLT entry
3104 because it avoids spreading offset corrections in several
3105 places. */
3106
3107 destination = (globals->splt->output_section->vma
3108 + globals->splt->output_offset
3109 + hash->root.plt.offset);
3110 st_type = STT_FUNC;
3111 }
3112
3113 branch_offset = (bfd_signed_vma)(destination - location);
3114
3115 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3116 {
3117 /* Handle cases where:
3118 - this call goes too far (different Thumb/Thumb2 max
3119 distance)
3120 - it's a Thumb->Arm call and blx is not available, or it's a
3121 Thumb->Arm branch (not bl). A stub is needed in this case,
3122 but only if this call is not through a PLT entry. Indeed,
3123 PLT stubs handle mode switching already.
3124 */
3125 if ((!thumb2
3126 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3127 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3128 || (thumb2
3129 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3130 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3131 || ((st_type != STT_ARM_TFUNC)
3132 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3133 || (r_type == R_ARM_THM_JUMP24))
3134 && !use_plt))
3135 {
3136 if (st_type == STT_ARM_TFUNC)
3137 {
3138 /* Thumb to thumb. */
3139 if (!thumb_only)
3140 {
3141 stub_type = (info->shared | globals->pic_veneer)
3142 /* PIC stubs. */
3143 ? ((globals->use_blx
3144 && (r_type ==R_ARM_THM_CALL))
3145 /* V5T and above. Stub starts with ARM code, so
3146 we must be able to switch mode before
3147 reaching it, which is only possible for 'bl'
3148 (ie R_ARM_THM_CALL relocation). */
3149 ? arm_stub_long_branch_any_thumb_pic
3150 /* On V4T, use Thumb code only. */
3151 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3152
3153 /* non-PIC stubs. */
3154 : ((globals->use_blx
3155 && (r_type ==R_ARM_THM_CALL))
3156 /* V5T and above. */
3157 ? arm_stub_long_branch_any_any
3158 /* V4T. */
3159 : arm_stub_long_branch_v4t_thumb_thumb);
3160 }
3161 else
3162 {
3163 stub_type = (info->shared | globals->pic_veneer)
3164 /* PIC stub. */
3165 ? arm_stub_long_branch_thumb_only_pic
3166 /* non-PIC stub. */
3167 : arm_stub_long_branch_thumb_only;
3168 }
3169 }
3170 else
3171 {
3172 /* Thumb to arm. */
3173 if (sym_sec != NULL
3174 && sym_sec->owner != NULL
3175 && !INTERWORK_FLAG (sym_sec->owner))
3176 {
3177 (*_bfd_error_handler)
3178 (_("%B(%s): warning: interworking not enabled.\n"
3179 " first occurrence: %B: Thumb call to ARM"),
3180 sym_sec->owner, input_bfd, name);
3181 }
3182
3183 stub_type = (info->shared | globals->pic_veneer)
3184 /* PIC stubs. */
3185 ? ((globals->use_blx
3186 && (r_type ==R_ARM_THM_CALL))
3187 /* V5T and above. */
3188 ? arm_stub_long_branch_any_arm_pic
3189 /* V4T PIC stub. */
3190 : arm_stub_long_branch_v4t_thumb_arm_pic)
3191
3192 /* non-PIC stubs. */
3193 : ((globals->use_blx
3194 && (r_type ==R_ARM_THM_CALL))
3195 /* V5T and above. */
3196 ? arm_stub_long_branch_any_any
3197 /* V4T. */
3198 : arm_stub_long_branch_v4t_thumb_arm);
3199
3200 /* Handle v4t short branches. */
3201 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3202 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3203 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3204 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3205 }
3206 }
3207 }
3208 else if (r_type == R_ARM_CALL
3209 || r_type == R_ARM_JUMP24
3210 || r_type == R_ARM_PLT32)
3211 {
3212 if (st_type == STT_ARM_TFUNC)
3213 {
3214 /* Arm to thumb. */
3215
3216 if (sym_sec != NULL
3217 && sym_sec->owner != NULL
3218 && !INTERWORK_FLAG (sym_sec->owner))
3219 {
3220 (*_bfd_error_handler)
3221 (_("%B(%s): warning: interworking not enabled.\n"
3222 " first occurrence: %B: ARM call to Thumb"),
3223 sym_sec->owner, input_bfd, name);
3224 }
3225
3226 /* We have an extra 2-bytes reach because of
3227 the mode change (bit 24 (H) of BLX encoding). */
3228 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3229 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3230 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3231 || (r_type == R_ARM_JUMP24)
3232 || (r_type == R_ARM_PLT32))
3233 {
3234 stub_type = (info->shared | globals->pic_veneer)
3235 /* PIC stubs. */
3236 ? ((globals->use_blx)
3237 /* V5T and above. */
3238 ? arm_stub_long_branch_any_thumb_pic
3239 /* V4T stub. */
3240 : arm_stub_long_branch_v4t_arm_thumb_pic)
3241
3242 /* non-PIC stubs. */
3243 : ((globals->use_blx)
3244 /* V5T and above. */
3245 ? arm_stub_long_branch_any_any
3246 /* V4T. */
3247 : arm_stub_long_branch_v4t_arm_thumb);
3248 }
3249 }
3250 else
3251 {
3252 /* Arm to arm. */
3253 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3254 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3255 {
3256 stub_type = (info->shared | globals->pic_veneer)
3257 /* PIC stubs. */
3258 ? arm_stub_long_branch_any_arm_pic
3259 /* non-PIC stubs. */
3260 : arm_stub_long_branch_any_any;
3261 }
3262 }
3263 }
3264
3265 /* If a stub is needed, record the actual destination type. */
3266 if (stub_type != arm_stub_none)
3267 {
3268 *actual_st_type = st_type;
3269 }
3270
3271 return stub_type;
3272 }
3273
3274 /* Build a name for an entry in the stub hash table. */
3275
3276 static char *
3277 elf32_arm_stub_name (const asection *input_section,
3278 const asection *sym_sec,
3279 const struct elf32_arm_link_hash_entry *hash,
3280 const Elf_Internal_Rela *rel,
3281 enum elf32_arm_stub_type stub_type)
3282 {
3283 char *stub_name;
3284 bfd_size_type len;
3285
3286 if (hash)
3287 {
3288 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3289 stub_name = (char *) bfd_malloc (len);
3290 if (stub_name != NULL)
3291 sprintf (stub_name, "%08x_%s+%x_%d",
3292 input_section->id & 0xffffffff,
3293 hash->root.root.root.string,
3294 (int) rel->r_addend & 0xffffffff,
3295 (int) stub_type);
3296 }
3297 else
3298 {
3299 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3300 stub_name = (char *) bfd_malloc (len);
3301 if (stub_name != NULL)
3302 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3303 input_section->id & 0xffffffff,
3304 sym_sec->id & 0xffffffff,
3305 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3306 (int) rel->r_addend & 0xffffffff,
3307 (int) stub_type);
3308 }
3309
3310 return stub_name;
3311 }
3312
3313 /* Look up an entry in the stub hash. Stub entries are cached because
3314 creating the stub name takes a bit of time. */
3315
3316 static struct elf32_arm_stub_hash_entry *
3317 elf32_arm_get_stub_entry (const asection *input_section,
3318 const asection *sym_sec,
3319 struct elf_link_hash_entry *hash,
3320 const Elf_Internal_Rela *rel,
3321 struct elf32_arm_link_hash_table *htab,
3322 enum elf32_arm_stub_type stub_type)
3323 {
3324 struct elf32_arm_stub_hash_entry *stub_entry;
3325 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3326 const asection *id_sec;
3327
3328 if ((input_section->flags & SEC_CODE) == 0)
3329 return NULL;
3330
3331 /* If this input section is part of a group of sections sharing one
3332 stub section, then use the id of the first section in the group.
3333 Stub names need to include a section id, as there may well be
3334 more than one stub used to reach say, printf, and we need to
3335 distinguish between them. */
3336 id_sec = htab->stub_group[input_section->id].link_sec;
3337
3338 if (h != NULL && h->stub_cache != NULL
3339 && h->stub_cache->h == h
3340 && h->stub_cache->id_sec == id_sec
3341 && h->stub_cache->stub_type == stub_type)
3342 {
3343 stub_entry = h->stub_cache;
3344 }
3345 else
3346 {
3347 char *stub_name;
3348
3349 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3350 if (stub_name == NULL)
3351 return NULL;
3352
3353 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3354 stub_name, FALSE, FALSE);
3355 if (h != NULL)
3356 h->stub_cache = stub_entry;
3357
3358 free (stub_name);
3359 }
3360
3361 return stub_entry;
3362 }
3363
3364 /* Find or create a stub section. Returns a pointer to the stub section, and
3365 the section to which the stub section will be attached (in *LINK_SEC_P).
3366 LINK_SEC_P may be NULL. */
3367
3368 static asection *
3369 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3370 struct elf32_arm_link_hash_table *htab)
3371 {
3372 asection *link_sec;
3373 asection *stub_sec;
3374
3375 link_sec = htab->stub_group[section->id].link_sec;
3376 stub_sec = htab->stub_group[section->id].stub_sec;
3377 if (stub_sec == NULL)
3378 {
3379 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3380 if (stub_sec == NULL)
3381 {
3382 size_t namelen;
3383 bfd_size_type len;
3384 char *s_name;
3385
3386 namelen = strlen (link_sec->name);
3387 len = namelen + sizeof (STUB_SUFFIX);
3388 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3389 if (s_name == NULL)
3390 return NULL;
3391
3392 memcpy (s_name, link_sec->name, namelen);
3393 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3394 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3395 if (stub_sec == NULL)
3396 return NULL;
3397 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3398 }
3399 htab->stub_group[section->id].stub_sec = stub_sec;
3400 }
3401
3402 if (link_sec_p)
3403 *link_sec_p = link_sec;
3404
3405 return stub_sec;
3406 }
3407
3408 /* Add a new stub entry to the stub hash. Not all fields of the new
3409 stub entry are initialised. */
3410
3411 static struct elf32_arm_stub_hash_entry *
3412 elf32_arm_add_stub (const char *stub_name,
3413 asection *section,
3414 struct elf32_arm_link_hash_table *htab)
3415 {
3416 asection *link_sec;
3417 asection *stub_sec;
3418 struct elf32_arm_stub_hash_entry *stub_entry;
3419
3420 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3421 if (stub_sec == NULL)
3422 return NULL;
3423
3424 /* Enter this entry into the linker stub hash table. */
3425 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3426 TRUE, FALSE);
3427 if (stub_entry == NULL)
3428 {
3429 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3430 section->owner,
3431 stub_name);
3432 return NULL;
3433 }
3434
3435 stub_entry->stub_sec = stub_sec;
3436 stub_entry->stub_offset = 0;
3437 stub_entry->id_sec = link_sec;
3438
3439 return stub_entry;
3440 }
3441
3442 /* Store an Arm insn into an output section not processed by
3443 elf32_arm_write_section. */
3444
3445 static void
3446 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3447 bfd * output_bfd, bfd_vma val, void * ptr)
3448 {
3449 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3450 bfd_putl32 (val, ptr);
3451 else
3452 bfd_putb32 (val, ptr);
3453 }
3454
3455 /* Store a 16-bit Thumb insn into an output section not processed by
3456 elf32_arm_write_section. */
3457
3458 static void
3459 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3460 bfd * output_bfd, bfd_vma val, void * ptr)
3461 {
3462 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3463 bfd_putl16 (val, ptr);
3464 else
3465 bfd_putb16 (val, ptr);
3466 }
3467
3468 static bfd_reloc_status_type elf32_arm_final_link_relocate
3469 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3470 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3471 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3472
3473 static bfd_boolean
3474 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3475 void * in_arg)
3476 {
3477 #define MAXRELOCS 2
3478 struct elf32_arm_stub_hash_entry *stub_entry;
3479 struct elf32_arm_link_hash_table *globals;
3480 struct bfd_link_info *info;
3481 asection *stub_sec;
3482 bfd *stub_bfd;
3483 bfd_vma stub_addr;
3484 bfd_byte *loc;
3485 bfd_vma sym_value;
3486 int template_size;
3487 int size;
3488 const insn_sequence *template_sequence;
3489 int i;
3490 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3491 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3492 int nrelocs = 0;
3493
3494 /* Massage our args to the form they really have. */
3495 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3496 info = (struct bfd_link_info *) in_arg;
3497
3498 globals = elf32_arm_hash_table (info);
3499 if (globals == NULL)
3500 return FALSE;
3501
3502 stub_sec = stub_entry->stub_sec;
3503
3504 if ((globals->fix_cortex_a8 < 0)
3505 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3506 /* We have to do the a8 fixes last, as they are less aligned than
3507 the other veneers. */
3508 return TRUE;
3509
3510 /* Make a note of the offset within the stubs for this entry. */
3511 stub_entry->stub_offset = stub_sec->size;
3512 loc = stub_sec->contents + stub_entry->stub_offset;
3513
3514 stub_bfd = stub_sec->owner;
3515
3516 /* This is the address of the start of the stub. */
3517 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3518 + stub_entry->stub_offset;
3519
3520 /* This is the address of the stub destination. */
3521 sym_value = (stub_entry->target_value
3522 + stub_entry->target_section->output_offset
3523 + stub_entry->target_section->output_section->vma);
3524
3525 template_sequence = stub_entry->stub_template;
3526 template_size = stub_entry->stub_template_size;
3527
3528 size = 0;
3529 for (i = 0; i < template_size; i++)
3530 {
3531 switch (template_sequence[i].type)
3532 {
3533 case THUMB16_TYPE:
3534 {
3535 bfd_vma data = (bfd_vma) template_sequence[i].data;
3536 if (template_sequence[i].reloc_addend != 0)
3537 {
3538 /* We've borrowed the reloc_addend field to mean we should
3539 insert a condition code into this (Thumb-1 branch)
3540 instruction. See THUMB16_BCOND_INSN. */
3541 BFD_ASSERT ((data & 0xff00) == 0xd000);
3542 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3543 }
3544 bfd_put_16 (stub_bfd, data, loc + size);
3545 size += 2;
3546 }
3547 break;
3548
3549 case THUMB32_TYPE:
3550 bfd_put_16 (stub_bfd,
3551 (template_sequence[i].data >> 16) & 0xffff,
3552 loc + size);
3553 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
3554 loc + size + 2);
3555 if (template_sequence[i].r_type != R_ARM_NONE)
3556 {
3557 stub_reloc_idx[nrelocs] = i;
3558 stub_reloc_offset[nrelocs++] = size;
3559 }
3560 size += 4;
3561 break;
3562
3563 case ARM_TYPE:
3564 bfd_put_32 (stub_bfd, template_sequence[i].data,
3565 loc + size);
3566 /* Handle cases where the target is encoded within the
3567 instruction. */
3568 if (template_sequence[i].r_type == R_ARM_JUMP24)
3569 {
3570 stub_reloc_idx[nrelocs] = i;
3571 stub_reloc_offset[nrelocs++] = size;
3572 }
3573 size += 4;
3574 break;
3575
3576 case DATA_TYPE:
3577 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3578 stub_reloc_idx[nrelocs] = i;
3579 stub_reloc_offset[nrelocs++] = size;
3580 size += 4;
3581 break;
3582
3583 default:
3584 BFD_FAIL ();
3585 return FALSE;
3586 }
3587 }
3588
3589 stub_sec->size += size;
3590
3591 /* Stub size has already been computed in arm_size_one_stub. Check
3592 consistency. */
3593 BFD_ASSERT (size == stub_entry->stub_size);
3594
3595 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3596 if (stub_entry->st_type == STT_ARM_TFUNC)
3597 sym_value |= 1;
3598
3599 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3600 in each stub. */
3601 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3602
3603 for (i = 0; i < nrelocs; i++)
3604 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3605 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3606 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3607 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3608 {
3609 Elf_Internal_Rela rel;
3610 bfd_boolean unresolved_reloc;
3611 char *error_message;
3612 int sym_flags
3613 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3614 ? STT_ARM_TFUNC : 0;
3615 bfd_vma points_to = sym_value + stub_entry->target_addend;
3616
3617 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3618 rel.r_info = ELF32_R_INFO (0,
3619 template_sequence[stub_reloc_idx[i]].r_type);
3620 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3621
3622 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3623 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3624 template should refer back to the instruction after the original
3625 branch. */
3626 points_to = sym_value;
3627
3628 /* There may be unintended consequences if this is not true. */
3629 BFD_ASSERT (stub_entry->h == NULL);
3630
3631 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3632 properly. We should probably use this function unconditionally,
3633 rather than only for certain relocations listed in the enclosing
3634 conditional, for the sake of consistency. */
3635 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3636 (template_sequence[stub_reloc_idx[i]].r_type),
3637 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3638 points_to, info, stub_entry->target_section, "", sym_flags,
3639 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3640 &error_message);
3641 }
3642 else
3643 {
3644 Elf_Internal_Rela rel;
3645 bfd_boolean unresolved_reloc;
3646 char *error_message;
3647 bfd_vma points_to = sym_value + stub_entry->target_addend
3648 + template_sequence[stub_reloc_idx[i]].reloc_addend;
3649
3650 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3651 rel.r_info = ELF32_R_INFO (0,
3652 template_sequence[stub_reloc_idx[i]].r_type);
3653 rel.r_addend = 0;
3654
3655 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3656 (template_sequence[stub_reloc_idx[i]].r_type),
3657 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3658 points_to, info, stub_entry->target_section, "", stub_entry->st_type,
3659 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3660 &error_message);
3661 }
3662
3663 return TRUE;
3664 #undef MAXRELOCS
3665 }
3666
3667 /* Calculate the template, template size and instruction size for a stub.
3668 Return value is the instruction size. */
3669
3670 static unsigned int
3671 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3672 const insn_sequence **stub_template,
3673 int *stub_template_size)
3674 {
3675 const insn_sequence *template_sequence = NULL;
3676 int template_size = 0, i;
3677 unsigned int size;
3678
3679 template_sequence = stub_definitions[stub_type].template_sequence;
3680 template_size = stub_definitions[stub_type].template_size;
3681
3682 size = 0;
3683 for (i = 0; i < template_size; i++)
3684 {
3685 switch (template_sequence[i].type)
3686 {
3687 case THUMB16_TYPE:
3688 size += 2;
3689 break;
3690
3691 case ARM_TYPE:
3692 case THUMB32_TYPE:
3693 case DATA_TYPE:
3694 size += 4;
3695 break;
3696
3697 default:
3698 BFD_FAIL ();
3699 return FALSE;
3700 }
3701 }
3702
3703 if (stub_template)
3704 *stub_template = template_sequence;
3705
3706 if (stub_template_size)
3707 *stub_template_size = template_size;
3708
3709 return size;
3710 }
3711
3712 /* As above, but don't actually build the stub. Just bump offset so
3713 we know stub section sizes. */
3714
3715 static bfd_boolean
3716 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3717 void * in_arg)
3718 {
3719 struct elf32_arm_stub_hash_entry *stub_entry;
3720 struct elf32_arm_link_hash_table *htab;
3721 const insn_sequence *template_sequence;
3722 int template_size, size;
3723
3724 /* Massage our args to the form they really have. */
3725 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3726 htab = (struct elf32_arm_link_hash_table *) in_arg;
3727
3728 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3729 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3730
3731 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3732 &template_size);
3733
3734 stub_entry->stub_size = size;
3735 stub_entry->stub_template = template_sequence;
3736 stub_entry->stub_template_size = template_size;
3737
3738 size = (size + 7) & ~7;
3739 stub_entry->stub_sec->size += size;
3740
3741 return TRUE;
3742 }
3743
3744 /* External entry points for sizing and building linker stubs. */
3745
3746 /* Set up various things so that we can make a list of input sections
3747 for each output section included in the link. Returns -1 on error,
3748 0 when no stubs will be needed, and 1 on success. */
3749
3750 int
3751 elf32_arm_setup_section_lists (bfd *output_bfd,
3752 struct bfd_link_info *info)
3753 {
3754 bfd *input_bfd;
3755 unsigned int bfd_count;
3756 int top_id, top_index;
3757 asection *section;
3758 asection **input_list, **list;
3759 bfd_size_type amt;
3760 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3761
3762 if (htab == NULL)
3763 return 0;
3764 if (! is_elf_hash_table (htab))
3765 return 0;
3766
3767 /* Count the number of input BFDs and find the top input section id. */
3768 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3769 input_bfd != NULL;
3770 input_bfd = input_bfd->link_next)
3771 {
3772 bfd_count += 1;
3773 for (section = input_bfd->sections;
3774 section != NULL;
3775 section = section->next)
3776 {
3777 if (top_id < section->id)
3778 top_id = section->id;
3779 }
3780 }
3781 htab->bfd_count = bfd_count;
3782
3783 amt = sizeof (struct map_stub) * (top_id + 1);
3784 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3785 if (htab->stub_group == NULL)
3786 return -1;
3787 htab->top_id = top_id;
3788
3789 /* We can't use output_bfd->section_count here to find the top output
3790 section index as some sections may have been removed, and
3791 _bfd_strip_section_from_output doesn't renumber the indices. */
3792 for (section = output_bfd->sections, top_index = 0;
3793 section != NULL;
3794 section = section->next)
3795 {
3796 if (top_index < section->index)
3797 top_index = section->index;
3798 }
3799
3800 htab->top_index = top_index;
3801 amt = sizeof (asection *) * (top_index + 1);
3802 input_list = (asection **) bfd_malloc (amt);
3803 htab->input_list = input_list;
3804 if (input_list == NULL)
3805 return -1;
3806
3807 /* For sections we aren't interested in, mark their entries with a
3808 value we can check later. */
3809 list = input_list + top_index;
3810 do
3811 *list = bfd_abs_section_ptr;
3812 while (list-- != input_list);
3813
3814 for (section = output_bfd->sections;
3815 section != NULL;
3816 section = section->next)
3817 {
3818 if ((section->flags & SEC_CODE) != 0)
3819 input_list[section->index] = NULL;
3820 }
3821
3822 return 1;
3823 }
3824
3825 /* The linker repeatedly calls this function for each input section,
3826 in the order that input sections are linked into output sections.
3827 Build lists of input sections to determine groupings between which
3828 we may insert linker stubs. */
3829
3830 void
3831 elf32_arm_next_input_section (struct bfd_link_info *info,
3832 asection *isec)
3833 {
3834 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3835
3836 if (htab == NULL)
3837 return;
3838
3839 if (isec->output_section->index <= htab->top_index)
3840 {
3841 asection **list = htab->input_list + isec->output_section->index;
3842
3843 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3844 {
3845 /* Steal the link_sec pointer for our list. */
3846 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3847 /* This happens to make the list in reverse order,
3848 which we reverse later. */
3849 PREV_SEC (isec) = *list;
3850 *list = isec;
3851 }
3852 }
3853 }
3854
3855 /* See whether we can group stub sections together. Grouping stub
3856 sections may result in fewer stubs. More importantly, we need to
3857 put all .init* and .fini* stubs at the end of the .init or
3858 .fini output sections respectively, because glibc splits the
3859 _init and _fini functions into multiple parts. Putting a stub in
3860 the middle of a function is not a good idea. */
3861
3862 static void
3863 group_sections (struct elf32_arm_link_hash_table *htab,
3864 bfd_size_type stub_group_size,
3865 bfd_boolean stubs_always_after_branch)
3866 {
3867 asection **list = htab->input_list;
3868
3869 do
3870 {
3871 asection *tail = *list;
3872 asection *head;
3873
3874 if (tail == bfd_abs_section_ptr)
3875 continue;
3876
3877 /* Reverse the list: we must avoid placing stubs at the
3878 beginning of the section because the beginning of the text
3879 section may be required for an interrupt vector in bare metal
3880 code. */
3881 #define NEXT_SEC PREV_SEC
3882 head = NULL;
3883 while (tail != NULL)
3884 {
3885 /* Pop from tail. */
3886 asection *item = tail;
3887 tail = PREV_SEC (item);
3888
3889 /* Push on head. */
3890 NEXT_SEC (item) = head;
3891 head = item;
3892 }
3893
3894 while (head != NULL)
3895 {
3896 asection *curr;
3897 asection *next;
3898 bfd_vma stub_group_start = head->output_offset;
3899 bfd_vma end_of_next;
3900
3901 curr = head;
3902 while (NEXT_SEC (curr) != NULL)
3903 {
3904 next = NEXT_SEC (curr);
3905 end_of_next = next->output_offset + next->size;
3906 if (end_of_next - stub_group_start >= stub_group_size)
3907 /* End of NEXT is too far from start, so stop. */
3908 break;
3909 /* Add NEXT to the group. */
3910 curr = next;
3911 }
3912
3913 /* OK, the size from the start to the start of CURR is less
3914 than stub_group_size and thus can be handled by one stub
3915 section. (Or the head section is itself larger than
3916 stub_group_size, in which case we may be toast.)
3917 We should really be keeping track of the total size of
3918 stubs added here, as stubs contribute to the final output
3919 section size. */
3920 do
3921 {
3922 next = NEXT_SEC (head);
3923 /* Set up this stub group. */
3924 htab->stub_group[head->id].link_sec = curr;
3925 }
3926 while (head != curr && (head = next) != NULL);
3927
3928 /* But wait, there's more! Input sections up to stub_group_size
3929 bytes after the stub section can be handled by it too. */
3930 if (!stubs_always_after_branch)
3931 {
3932 stub_group_start = curr->output_offset + curr->size;
3933
3934 while (next != NULL)
3935 {
3936 end_of_next = next->output_offset + next->size;
3937 if (end_of_next - stub_group_start >= stub_group_size)
3938 /* End of NEXT is too far from stubs, so stop. */
3939 break;
3940 /* Add NEXT to the stub group. */
3941 head = next;
3942 next = NEXT_SEC (head);
3943 htab->stub_group[head->id].link_sec = curr;
3944 }
3945 }
3946 head = next;
3947 }
3948 }
3949 while (list++ != htab->input_list + htab->top_index);
3950
3951 free (htab->input_list);
3952 #undef PREV_SEC
3953 #undef NEXT_SEC
3954 }
3955
3956 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3957 erratum fix. */
3958
3959 static int
3960 a8_reloc_compare (const void *a, const void *b)
3961 {
3962 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3963 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3964
3965 if (ra->from < rb->from)
3966 return -1;
3967 else if (ra->from > rb->from)
3968 return 1;
3969 else
3970 return 0;
3971 }
3972
3973 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3974 const char *, char **);
3975
3976 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3977 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3978 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3979 otherwise. */
3980
3981 static bfd_boolean
3982 cortex_a8_erratum_scan (bfd *input_bfd,
3983 struct bfd_link_info *info,
3984 struct a8_erratum_fix **a8_fixes_p,
3985 unsigned int *num_a8_fixes_p,
3986 unsigned int *a8_fix_table_size_p,
3987 struct a8_erratum_reloc *a8_relocs,
3988 unsigned int num_a8_relocs,
3989 unsigned prev_num_a8_fixes,
3990 bfd_boolean *stub_changed_p)
3991 {
3992 asection *section;
3993 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3994 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3995 unsigned int num_a8_fixes = *num_a8_fixes_p;
3996 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3997
3998 if (htab == NULL)
3999 return FALSE;
4000
4001 for (section = input_bfd->sections;
4002 section != NULL;
4003 section = section->next)
4004 {
4005 bfd_byte *contents = NULL;
4006 struct _arm_elf_section_data *sec_data;
4007 unsigned int span;
4008 bfd_vma base_vma;
4009
4010 if (elf_section_type (section) != SHT_PROGBITS
4011 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4012 || (section->flags & SEC_EXCLUDE) != 0
4013 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
4014 || (section->output_section == bfd_abs_section_ptr))
4015 continue;
4016
4017 base_vma = section->output_section->vma + section->output_offset;
4018
4019 if (elf_section_data (section)->this_hdr.contents != NULL)
4020 contents = elf_section_data (section)->this_hdr.contents;
4021 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4022 return TRUE;
4023
4024 sec_data = elf32_arm_section_data (section);
4025
4026 for (span = 0; span < sec_data->mapcount; span++)
4027 {
4028 unsigned int span_start = sec_data->map[span].vma;
4029 unsigned int span_end = (span == sec_data->mapcount - 1)
4030 ? section->size : sec_data->map[span + 1].vma;
4031 unsigned int i;
4032 char span_type = sec_data->map[span].type;
4033 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4034
4035 if (span_type != 't')
4036 continue;
4037
4038 /* Span is entirely within a single 4KB region: skip scanning. */
4039 if (((base_vma + span_start) & ~0xfff)
4040 == ((base_vma + span_end) & ~0xfff))
4041 continue;
4042
4043 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4044
4045 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4046 * The branch target is in the same 4KB region as the
4047 first half of the branch.
4048 * The instruction before the branch is a 32-bit
4049 length non-branch instruction. */
4050 for (i = span_start; i < span_end;)
4051 {
4052 unsigned int insn = bfd_getl16 (&contents[i]);
4053 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4054 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4055
4056 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4057 insn_32bit = TRUE;
4058
4059 if (insn_32bit)
4060 {
4061 /* Load the rest of the insn (in manual-friendly order). */
4062 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4063
4064 /* Encoding T4: B<c>.W. */
4065 is_b = (insn & 0xf800d000) == 0xf0009000;
4066 /* Encoding T1: BL<c>.W. */
4067 is_bl = (insn & 0xf800d000) == 0xf000d000;
4068 /* Encoding T2: BLX<c>.W. */
4069 is_blx = (insn & 0xf800d000) == 0xf000c000;
4070 /* Encoding T3: B<c>.W (not permitted in IT block). */
4071 is_bcc = (insn & 0xf800d000) == 0xf0008000
4072 && (insn & 0x07f00000) != 0x03800000;
4073 }
4074
4075 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4076
4077 if (((base_vma + i) & 0xfff) == 0xffe
4078 && insn_32bit
4079 && is_32bit_branch
4080 && last_was_32bit
4081 && ! last_was_branch)
4082 {
4083 bfd_signed_vma offset;
4084 bfd_boolean force_target_arm = FALSE;
4085 bfd_boolean force_target_thumb = FALSE;
4086 bfd_vma target;
4087 enum elf32_arm_stub_type stub_type = arm_stub_none;
4088 struct a8_erratum_reloc key, *found;
4089
4090 key.from = base_vma + i;
4091 found = (struct a8_erratum_reloc *)
4092 bsearch (&key, a8_relocs, num_a8_relocs,
4093 sizeof (struct a8_erratum_reloc),
4094 &a8_reloc_compare);
4095
4096 if (found)
4097 {
4098 char *error_message = NULL;
4099 struct elf_link_hash_entry *entry;
4100
4101 /* We don't care about the error returned from this
4102 function, only if there is glue or not. */
4103 entry = find_thumb_glue (info, found->sym_name,
4104 &error_message);
4105
4106 if (entry)
4107 found->non_a8_stub = TRUE;
4108
4109 if (found->r_type == R_ARM_THM_CALL
4110 && found->st_type != STT_ARM_TFUNC)
4111 force_target_arm = TRUE;
4112 else if (found->r_type == R_ARM_THM_CALL
4113 && found->st_type == STT_ARM_TFUNC)
4114 force_target_thumb = TRUE;
4115 }
4116
4117 /* Check if we have an offending branch instruction. */
4118
4119 if (found && found->non_a8_stub)
4120 /* We've already made a stub for this instruction, e.g.
4121 it's a long branch or a Thumb->ARM stub. Assume that
4122 stub will suffice to work around the A8 erratum (see
4123 setting of always_after_branch above). */
4124 ;
4125 else if (is_bcc)
4126 {
4127 offset = (insn & 0x7ff) << 1;
4128 offset |= (insn & 0x3f0000) >> 4;
4129 offset |= (insn & 0x2000) ? 0x40000 : 0;
4130 offset |= (insn & 0x800) ? 0x80000 : 0;
4131 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4132 if (offset & 0x100000)
4133 offset |= ~ ((bfd_signed_vma) 0xfffff);
4134 stub_type = arm_stub_a8_veneer_b_cond;
4135 }
4136 else if (is_b || is_bl || is_blx)
4137 {
4138 int s = (insn & 0x4000000) != 0;
4139 int j1 = (insn & 0x2000) != 0;
4140 int j2 = (insn & 0x800) != 0;
4141 int i1 = !(j1 ^ s);
4142 int i2 = !(j2 ^ s);
4143
4144 offset = (insn & 0x7ff) << 1;
4145 offset |= (insn & 0x3ff0000) >> 4;
4146 offset |= i2 << 22;
4147 offset |= i1 << 23;
4148 offset |= s << 24;
4149 if (offset & 0x1000000)
4150 offset |= ~ ((bfd_signed_vma) 0xffffff);
4151
4152 if (is_blx)
4153 offset &= ~ ((bfd_signed_vma) 3);
4154
4155 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4156 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4157 }
4158
4159 if (stub_type != arm_stub_none)
4160 {
4161 bfd_vma pc_for_insn = base_vma + i + 4;
4162
4163 /* The original instruction is a BL, but the target is
4164 an ARM instruction. If we were not making a stub,
4165 the BL would have been converted to a BLX. Use the
4166 BLX stub instead in that case. */
4167 if (htab->use_blx && force_target_arm
4168 && stub_type == arm_stub_a8_veneer_bl)
4169 {
4170 stub_type = arm_stub_a8_veneer_blx;
4171 is_blx = TRUE;
4172 is_bl = FALSE;
4173 }
4174 /* Conversely, if the original instruction was
4175 BLX but the target is Thumb mode, use the BL
4176 stub. */
4177 else if (force_target_thumb
4178 && stub_type == arm_stub_a8_veneer_blx)
4179 {
4180 stub_type = arm_stub_a8_veneer_bl;
4181 is_blx = FALSE;
4182 is_bl = TRUE;
4183 }
4184
4185 if (is_blx)
4186 pc_for_insn &= ~ ((bfd_vma) 3);
4187
4188 /* If we found a relocation, use the proper destination,
4189 not the offset in the (unrelocated) instruction.
4190 Note this is always done if we switched the stub type
4191 above. */
4192 if (found)
4193 offset =
4194 (bfd_signed_vma) (found->destination - pc_for_insn);
4195
4196 target = pc_for_insn + offset;
4197
4198 /* The BLX stub is ARM-mode code. Adjust the offset to
4199 take the different PC value (+8 instead of +4) into
4200 account. */
4201 if (stub_type == arm_stub_a8_veneer_blx)
4202 offset += 4;
4203
4204 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4205 {
4206 char *stub_name = NULL;
4207
4208 if (num_a8_fixes == a8_fix_table_size)
4209 {
4210 a8_fix_table_size *= 2;
4211 a8_fixes = (struct a8_erratum_fix *)
4212 bfd_realloc (a8_fixes,
4213 sizeof (struct a8_erratum_fix)
4214 * a8_fix_table_size);
4215 }
4216
4217 if (num_a8_fixes < prev_num_a8_fixes)
4218 {
4219 /* If we're doing a subsequent scan,
4220 check if we've found the same fix as
4221 before, and try and reuse the stub
4222 name. */
4223 stub_name = a8_fixes[num_a8_fixes].stub_name;
4224 if ((a8_fixes[num_a8_fixes].section != section)
4225 || (a8_fixes[num_a8_fixes].offset != i))
4226 {
4227 free (stub_name);
4228 stub_name = NULL;
4229 *stub_changed_p = TRUE;
4230 }
4231 }
4232
4233 if (!stub_name)
4234 {
4235 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4236 if (stub_name != NULL)
4237 sprintf (stub_name, "%x:%x", section->id, i);
4238 }
4239
4240 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4241 a8_fixes[num_a8_fixes].section = section;
4242 a8_fixes[num_a8_fixes].offset = i;
4243 a8_fixes[num_a8_fixes].addend = offset;
4244 a8_fixes[num_a8_fixes].orig_insn = insn;
4245 a8_fixes[num_a8_fixes].stub_name = stub_name;
4246 a8_fixes[num_a8_fixes].stub_type = stub_type;
4247 a8_fixes[num_a8_fixes].st_type =
4248 is_blx ? STT_FUNC : STT_ARM_TFUNC;
4249
4250 num_a8_fixes++;
4251 }
4252 }
4253 }
4254
4255 i += insn_32bit ? 4 : 2;
4256 last_was_32bit = insn_32bit;
4257 last_was_branch = is_32bit_branch;
4258 }
4259 }
4260
4261 if (elf_section_data (section)->this_hdr.contents == NULL)
4262 free (contents);
4263 }
4264
4265 *a8_fixes_p = a8_fixes;
4266 *num_a8_fixes_p = num_a8_fixes;
4267 *a8_fix_table_size_p = a8_fix_table_size;
4268
4269 return FALSE;
4270 }
4271
4272 /* Determine and set the size of the stub section for a final link.
4273
4274 The basic idea here is to examine all the relocations looking for
4275 PC-relative calls to a target that is unreachable with a "bl"
4276 instruction. */
4277
4278 bfd_boolean
4279 elf32_arm_size_stubs (bfd *output_bfd,
4280 bfd *stub_bfd,
4281 struct bfd_link_info *info,
4282 bfd_signed_vma group_size,
4283 asection * (*add_stub_section) (const char *, asection *),
4284 void (*layout_sections_again) (void))
4285 {
4286 bfd_size_type stub_group_size;
4287 bfd_boolean stubs_always_after_branch;
4288 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4289 struct a8_erratum_fix *a8_fixes = NULL;
4290 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4291 struct a8_erratum_reloc *a8_relocs = NULL;
4292 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4293
4294 if (htab == NULL)
4295 return FALSE;
4296
4297 if (htab->fix_cortex_a8)
4298 {
4299 a8_fixes = (struct a8_erratum_fix *)
4300 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4301 a8_relocs = (struct a8_erratum_reloc *)
4302 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4303 }
4304
4305 /* Propagate mach to stub bfd, because it may not have been
4306 finalized when we created stub_bfd. */
4307 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4308 bfd_get_mach (output_bfd));
4309
4310 /* Stash our params away. */
4311 htab->stub_bfd = stub_bfd;
4312 htab->add_stub_section = add_stub_section;
4313 htab->layout_sections_again = layout_sections_again;
4314 stubs_always_after_branch = group_size < 0;
4315
4316 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4317 as the first half of a 32-bit branch straddling two 4K pages. This is a
4318 crude way of enforcing that. */
4319 if (htab->fix_cortex_a8)
4320 stubs_always_after_branch = 1;
4321
4322 if (group_size < 0)
4323 stub_group_size = -group_size;
4324 else
4325 stub_group_size = group_size;
4326
4327 if (stub_group_size == 1)
4328 {
4329 /* Default values. */
4330 /* Thumb branch range is +-4MB has to be used as the default
4331 maximum size (a given section can contain both ARM and Thumb
4332 code, so the worst case has to be taken into account).
4333
4334 This value is 24K less than that, which allows for 2025
4335 12-byte stubs. If we exceed that, then we will fail to link.
4336 The user will have to relink with an explicit group size
4337 option. */
4338 stub_group_size = 4170000;
4339 }
4340
4341 group_sections (htab, stub_group_size, stubs_always_after_branch);
4342
4343 /* If we're applying the cortex A8 fix, we need to determine the
4344 program header size now, because we cannot change it later --
4345 that could alter section placements. Notice the A8 erratum fix
4346 ends up requiring the section addresses to remain unchanged
4347 modulo the page size. That's something we cannot represent
4348 inside BFD, and we don't want to force the section alignment to
4349 be the page size. */
4350 if (htab->fix_cortex_a8)
4351 (*htab->layout_sections_again) ();
4352
4353 while (1)
4354 {
4355 bfd *input_bfd;
4356 unsigned int bfd_indx;
4357 asection *stub_sec;
4358 bfd_boolean stub_changed = FALSE;
4359 unsigned prev_num_a8_fixes = num_a8_fixes;
4360
4361 num_a8_fixes = 0;
4362 for (input_bfd = info->input_bfds, bfd_indx = 0;
4363 input_bfd != NULL;
4364 input_bfd = input_bfd->link_next, bfd_indx++)
4365 {
4366 Elf_Internal_Shdr *symtab_hdr;
4367 asection *section;
4368 Elf_Internal_Sym *local_syms = NULL;
4369
4370 num_a8_relocs = 0;
4371
4372 /* We'll need the symbol table in a second. */
4373 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4374 if (symtab_hdr->sh_info == 0)
4375 continue;
4376
4377 /* Walk over each section attached to the input bfd. */
4378 for (section = input_bfd->sections;
4379 section != NULL;
4380 section = section->next)
4381 {
4382 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4383
4384 /* If there aren't any relocs, then there's nothing more
4385 to do. */
4386 if ((section->flags & SEC_RELOC) == 0
4387 || section->reloc_count == 0
4388 || (section->flags & SEC_CODE) == 0)
4389 continue;
4390
4391 /* If this section is a link-once section that will be
4392 discarded, then don't create any stubs. */
4393 if (section->output_section == NULL
4394 || section->output_section->owner != output_bfd)
4395 continue;
4396
4397 /* Get the relocs. */
4398 internal_relocs
4399 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4400 NULL, info->keep_memory);
4401 if (internal_relocs == NULL)
4402 goto error_ret_free_local;
4403
4404 /* Now examine each relocation. */
4405 irela = internal_relocs;
4406 irelaend = irela + section->reloc_count;
4407 for (; irela < irelaend; irela++)
4408 {
4409 unsigned int r_type, r_indx;
4410 enum elf32_arm_stub_type stub_type;
4411 struct elf32_arm_stub_hash_entry *stub_entry;
4412 asection *sym_sec;
4413 bfd_vma sym_value;
4414 bfd_vma destination;
4415 struct elf32_arm_link_hash_entry *hash;
4416 const char *sym_name;
4417 char *stub_name;
4418 const asection *id_sec;
4419 int st_type;
4420 bfd_boolean created_stub = FALSE;
4421
4422 r_type = ELF32_R_TYPE (irela->r_info);
4423 r_indx = ELF32_R_SYM (irela->r_info);
4424
4425 if (r_type >= (unsigned int) R_ARM_max)
4426 {
4427 bfd_set_error (bfd_error_bad_value);
4428 error_ret_free_internal:
4429 if (elf_section_data (section)->relocs == NULL)
4430 free (internal_relocs);
4431 goto error_ret_free_local;
4432 }
4433
4434 /* Only look for stubs on branch instructions. */
4435 if ((r_type != (unsigned int) R_ARM_CALL)
4436 && (r_type != (unsigned int) R_ARM_THM_CALL)
4437 && (r_type != (unsigned int) R_ARM_JUMP24)
4438 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4439 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4440 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4441 && (r_type != (unsigned int) R_ARM_PLT32))
4442 continue;
4443
4444 /* Now determine the call target, its name, value,
4445 section. */
4446 sym_sec = NULL;
4447 sym_value = 0;
4448 destination = 0;
4449 hash = NULL;
4450 sym_name = NULL;
4451 if (r_indx < symtab_hdr->sh_info)
4452 {
4453 /* It's a local symbol. */
4454 Elf_Internal_Sym *sym;
4455 Elf_Internal_Shdr *hdr;
4456
4457 if (local_syms == NULL)
4458 {
4459 local_syms
4460 = (Elf_Internal_Sym *) symtab_hdr->contents;
4461 if (local_syms == NULL)
4462 local_syms
4463 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4464 symtab_hdr->sh_info, 0,
4465 NULL, NULL, NULL);
4466 if (local_syms == NULL)
4467 goto error_ret_free_internal;
4468 }
4469
4470 sym = local_syms + r_indx;
4471 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4472 sym_sec = hdr->bfd_section;
4473 if (!sym_sec)
4474 /* This is an undefined symbol. It can never
4475 be resolved. */
4476 continue;
4477
4478 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4479 sym_value = sym->st_value;
4480 destination = (sym_value + irela->r_addend
4481 + sym_sec->output_offset
4482 + sym_sec->output_section->vma);
4483 st_type = ELF_ST_TYPE (sym->st_info);
4484 sym_name
4485 = bfd_elf_string_from_elf_section (input_bfd,
4486 symtab_hdr->sh_link,
4487 sym->st_name);
4488 }
4489 else
4490 {
4491 /* It's an external symbol. */
4492 int e_indx;
4493
4494 e_indx = r_indx - symtab_hdr->sh_info;
4495 hash = ((struct elf32_arm_link_hash_entry *)
4496 elf_sym_hashes (input_bfd)[e_indx]);
4497
4498 while (hash->root.root.type == bfd_link_hash_indirect
4499 || hash->root.root.type == bfd_link_hash_warning)
4500 hash = ((struct elf32_arm_link_hash_entry *)
4501 hash->root.root.u.i.link);
4502
4503 if (hash->root.root.type == bfd_link_hash_defined
4504 || hash->root.root.type == bfd_link_hash_defweak)
4505 {
4506 sym_sec = hash->root.root.u.def.section;
4507 sym_value = hash->root.root.u.def.value;
4508
4509 struct elf32_arm_link_hash_table *globals =
4510 elf32_arm_hash_table (info);
4511
4512 /* For a destination in a shared library,
4513 use the PLT stub as target address to
4514 decide whether a branch stub is
4515 needed. */
4516 if (globals != NULL
4517 && globals->splt != NULL
4518 && hash != NULL
4519 && hash->root.plt.offset != (bfd_vma) -1)
4520 {
4521 sym_sec = globals->splt;
4522 sym_value = hash->root.plt.offset;
4523 if (sym_sec->output_section != NULL)
4524 destination = (sym_value
4525 + sym_sec->output_offset
4526 + sym_sec->output_section->vma);
4527 }
4528 else if (sym_sec->output_section != NULL)
4529 destination = (sym_value + irela->r_addend
4530 + sym_sec->output_offset
4531 + sym_sec->output_section->vma);
4532 }
4533 else if ((hash->root.root.type == bfd_link_hash_undefined)
4534 || (hash->root.root.type == bfd_link_hash_undefweak))
4535 {
4536 /* For a shared library, use the PLT stub as
4537 target address to decide whether a long
4538 branch stub is needed.
4539 For absolute code, they cannot be handled. */
4540 struct elf32_arm_link_hash_table *globals =
4541 elf32_arm_hash_table (info);
4542
4543 if (globals != NULL
4544 && globals->splt != NULL
4545 && hash != NULL
4546 && hash->root.plt.offset != (bfd_vma) -1)
4547 {
4548 sym_sec = globals->splt;
4549 sym_value = hash->root.plt.offset;
4550 if (sym_sec->output_section != NULL)
4551 destination = (sym_value
4552 + sym_sec->output_offset
4553 + sym_sec->output_section->vma);
4554 }
4555 else
4556 continue;
4557 }
4558 else
4559 {
4560 bfd_set_error (bfd_error_bad_value);
4561 goto error_ret_free_internal;
4562 }
4563 st_type = ELF_ST_TYPE (hash->root.type);
4564 sym_name = hash->root.root.root.string;
4565 }
4566
4567 do
4568 {
4569 /* Determine what (if any) linker stub is needed. */
4570 stub_type = arm_type_of_stub (info, section, irela,
4571 &st_type, hash,
4572 destination, sym_sec,
4573 input_bfd, sym_name);
4574 if (stub_type == arm_stub_none)
4575 break;
4576
4577 /* Support for grouping stub sections. */
4578 id_sec = htab->stub_group[section->id].link_sec;
4579
4580 /* Get the name of this stub. */
4581 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4582 irela, stub_type);
4583 if (!stub_name)
4584 goto error_ret_free_internal;
4585
4586 /* We've either created a stub for this reloc already,
4587 or we are about to. */
4588 created_stub = TRUE;
4589
4590 stub_entry = arm_stub_hash_lookup
4591 (&htab->stub_hash_table, stub_name,
4592 FALSE, FALSE);
4593 if (stub_entry != NULL)
4594 {
4595 /* The proper stub has already been created. */
4596 free (stub_name);
4597 stub_entry->target_value = sym_value;
4598 break;
4599 }
4600
4601 stub_entry = elf32_arm_add_stub (stub_name, section,
4602 htab);
4603 if (stub_entry == NULL)
4604 {
4605 free (stub_name);
4606 goto error_ret_free_internal;
4607 }
4608
4609 stub_entry->target_value = sym_value;
4610 stub_entry->target_section = sym_sec;
4611 stub_entry->stub_type = stub_type;
4612 stub_entry->h = hash;
4613 stub_entry->st_type = st_type;
4614
4615 if (sym_name == NULL)
4616 sym_name = "unnamed";
4617 stub_entry->output_name = (char *)
4618 bfd_alloc (htab->stub_bfd,
4619 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4620 + strlen (sym_name));
4621 if (stub_entry->output_name == NULL)
4622 {
4623 free (stub_name);
4624 goto error_ret_free_internal;
4625 }
4626
4627 /* For historical reasons, use the existing names for
4628 ARM-to-Thumb and Thumb-to-ARM stubs. */
4629 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4630 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4631 && st_type != STT_ARM_TFUNC)
4632 sprintf (stub_entry->output_name,
4633 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4634 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4635 || (r_type == (unsigned int) R_ARM_JUMP24))
4636 && st_type == STT_ARM_TFUNC)
4637 sprintf (stub_entry->output_name,
4638 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4639 else
4640 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4641 sym_name);
4642
4643 stub_changed = TRUE;
4644 }
4645 while (0);
4646
4647 /* Look for relocations which might trigger Cortex-A8
4648 erratum. */
4649 if (htab->fix_cortex_a8
4650 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4651 || r_type == (unsigned int) R_ARM_THM_JUMP19
4652 || r_type == (unsigned int) R_ARM_THM_CALL
4653 || r_type == (unsigned int) R_ARM_THM_XPC22))
4654 {
4655 bfd_vma from = section->output_section->vma
4656 + section->output_offset
4657 + irela->r_offset;
4658
4659 if ((from & 0xfff) == 0xffe)
4660 {
4661 /* Found a candidate. Note we haven't checked the
4662 destination is within 4K here: if we do so (and
4663 don't create an entry in a8_relocs) we can't tell
4664 that a branch should have been relocated when
4665 scanning later. */
4666 if (num_a8_relocs == a8_reloc_table_size)
4667 {
4668 a8_reloc_table_size *= 2;
4669 a8_relocs = (struct a8_erratum_reloc *)
4670 bfd_realloc (a8_relocs,
4671 sizeof (struct a8_erratum_reloc)
4672 * a8_reloc_table_size);
4673 }
4674
4675 a8_relocs[num_a8_relocs].from = from;
4676 a8_relocs[num_a8_relocs].destination = destination;
4677 a8_relocs[num_a8_relocs].r_type = r_type;
4678 a8_relocs[num_a8_relocs].st_type = st_type;
4679 a8_relocs[num_a8_relocs].sym_name = sym_name;
4680 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4681
4682 num_a8_relocs++;
4683 }
4684 }
4685 }
4686
4687 /* We're done with the internal relocs, free them. */
4688 if (elf_section_data (section)->relocs == NULL)
4689 free (internal_relocs);
4690 }
4691
4692 if (htab->fix_cortex_a8)
4693 {
4694 /* Sort relocs which might apply to Cortex-A8 erratum. */
4695 qsort (a8_relocs, num_a8_relocs,
4696 sizeof (struct a8_erratum_reloc),
4697 &a8_reloc_compare);
4698
4699 /* Scan for branches which might trigger Cortex-A8 erratum. */
4700 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4701 &num_a8_fixes, &a8_fix_table_size,
4702 a8_relocs, num_a8_relocs,
4703 prev_num_a8_fixes, &stub_changed)
4704 != 0)
4705 goto error_ret_free_local;
4706 }
4707 }
4708
4709 if (prev_num_a8_fixes != num_a8_fixes)
4710 stub_changed = TRUE;
4711
4712 if (!stub_changed)
4713 break;
4714
4715 /* OK, we've added some stubs. Find out the new size of the
4716 stub sections. */
4717 for (stub_sec = htab->stub_bfd->sections;
4718 stub_sec != NULL;
4719 stub_sec = stub_sec->next)
4720 {
4721 /* Ignore non-stub sections. */
4722 if (!strstr (stub_sec->name, STUB_SUFFIX))
4723 continue;
4724
4725 stub_sec->size = 0;
4726 }
4727
4728 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4729
4730 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4731 if (htab->fix_cortex_a8)
4732 for (i = 0; i < num_a8_fixes; i++)
4733 {
4734 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4735 a8_fixes[i].section, htab);
4736
4737 if (stub_sec == NULL)
4738 goto error_ret_free_local;
4739
4740 stub_sec->size
4741 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4742 NULL);
4743 }
4744
4745
4746 /* Ask the linker to do its stuff. */
4747 (*htab->layout_sections_again) ();
4748 }
4749
4750 /* Add stubs for Cortex-A8 erratum fixes now. */
4751 if (htab->fix_cortex_a8)
4752 {
4753 for (i = 0; i < num_a8_fixes; i++)
4754 {
4755 struct elf32_arm_stub_hash_entry *stub_entry;
4756 char *stub_name = a8_fixes[i].stub_name;
4757 asection *section = a8_fixes[i].section;
4758 unsigned int section_id = a8_fixes[i].section->id;
4759 asection *link_sec = htab->stub_group[section_id].link_sec;
4760 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4761 const insn_sequence *template_sequence;
4762 int template_size, size = 0;
4763
4764 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4765 TRUE, FALSE);
4766 if (stub_entry == NULL)
4767 {
4768 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4769 section->owner,
4770 stub_name);
4771 return FALSE;
4772 }
4773
4774 stub_entry->stub_sec = stub_sec;
4775 stub_entry->stub_offset = 0;
4776 stub_entry->id_sec = link_sec;
4777 stub_entry->stub_type = a8_fixes[i].stub_type;
4778 stub_entry->target_section = a8_fixes[i].section;
4779 stub_entry->target_value = a8_fixes[i].offset;
4780 stub_entry->target_addend = a8_fixes[i].addend;
4781 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4782 stub_entry->st_type = a8_fixes[i].st_type;
4783
4784 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4785 &template_sequence,
4786 &template_size);
4787
4788 stub_entry->stub_size = size;
4789 stub_entry->stub_template = template_sequence;
4790 stub_entry->stub_template_size = template_size;
4791 }
4792
4793 /* Stash the Cortex-A8 erratum fix array for use later in
4794 elf32_arm_write_section(). */
4795 htab->a8_erratum_fixes = a8_fixes;
4796 htab->num_a8_erratum_fixes = num_a8_fixes;
4797 }
4798 else
4799 {
4800 htab->a8_erratum_fixes = NULL;
4801 htab->num_a8_erratum_fixes = 0;
4802 }
4803 return TRUE;
4804
4805 error_ret_free_local:
4806 return FALSE;
4807 }
4808
4809 /* Build all the stubs associated with the current output file. The
4810 stubs are kept in a hash table attached to the main linker hash
4811 table. We also set up the .plt entries for statically linked PIC
4812 functions here. This function is called via arm_elf_finish in the
4813 linker. */
4814
4815 bfd_boolean
4816 elf32_arm_build_stubs (struct bfd_link_info *info)
4817 {
4818 asection *stub_sec;
4819 struct bfd_hash_table *table;
4820 struct elf32_arm_link_hash_table *htab;
4821
4822 htab = elf32_arm_hash_table (info);
4823 if (htab == NULL)
4824 return FALSE;
4825
4826 for (stub_sec = htab->stub_bfd->sections;
4827 stub_sec != NULL;
4828 stub_sec = stub_sec->next)
4829 {
4830 bfd_size_type size;
4831
4832 /* Ignore non-stub sections. */
4833 if (!strstr (stub_sec->name, STUB_SUFFIX))
4834 continue;
4835
4836 /* Allocate memory to hold the linker stubs. */
4837 size = stub_sec->size;
4838 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4839 if (stub_sec->contents == NULL && size != 0)
4840 return FALSE;
4841 stub_sec->size = 0;
4842 }
4843
4844 /* Build the stubs as directed by the stub hash table. */
4845 table = &htab->stub_hash_table;
4846 bfd_hash_traverse (table, arm_build_one_stub, info);
4847 if (htab->fix_cortex_a8)
4848 {
4849 /* Place the cortex a8 stubs last. */
4850 htab->fix_cortex_a8 = -1;
4851 bfd_hash_traverse (table, arm_build_one_stub, info);
4852 }
4853
4854 return TRUE;
4855 }
4856
4857 /* Locate the Thumb encoded calling stub for NAME. */
4858
4859 static struct elf_link_hash_entry *
4860 find_thumb_glue (struct bfd_link_info *link_info,
4861 const char *name,
4862 char **error_message)
4863 {
4864 char *tmp_name;
4865 struct elf_link_hash_entry *hash;
4866 struct elf32_arm_link_hash_table *hash_table;
4867
4868 /* We need a pointer to the armelf specific hash table. */
4869 hash_table = elf32_arm_hash_table (link_info);
4870 if (hash_table == NULL)
4871 return NULL;
4872
4873 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4874 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4875
4876 BFD_ASSERT (tmp_name);
4877
4878 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4879
4880 hash = elf_link_hash_lookup
4881 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4882
4883 if (hash == NULL
4884 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4885 tmp_name, name) == -1)
4886 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4887
4888 free (tmp_name);
4889
4890 return hash;
4891 }
4892
4893 /* Locate the ARM encoded calling stub for NAME. */
4894
4895 static struct elf_link_hash_entry *
4896 find_arm_glue (struct bfd_link_info *link_info,
4897 const char *name,
4898 char **error_message)
4899 {
4900 char *tmp_name;
4901 struct elf_link_hash_entry *myh;
4902 struct elf32_arm_link_hash_table *hash_table;
4903
4904 /* We need a pointer to the elfarm specific hash table. */
4905 hash_table = elf32_arm_hash_table (link_info);
4906 if (hash_table == NULL)
4907 return NULL;
4908
4909 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4910 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4911
4912 BFD_ASSERT (tmp_name);
4913
4914 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4915
4916 myh = elf_link_hash_lookup
4917 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4918
4919 if (myh == NULL
4920 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4921 tmp_name, name) == -1)
4922 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4923
4924 free (tmp_name);
4925
4926 return myh;
4927 }
4928
4929 /* ARM->Thumb glue (static images):
4930
4931 .arm
4932 __func_from_arm:
4933 ldr r12, __func_addr
4934 bx r12
4935 __func_addr:
4936 .word func @ behave as if you saw a ARM_32 reloc.
4937
4938 (v5t static images)
4939 .arm
4940 __func_from_arm:
4941 ldr pc, __func_addr
4942 __func_addr:
4943 .word func @ behave as if you saw a ARM_32 reloc.
4944
4945 (relocatable images)
4946 .arm
4947 __func_from_arm:
4948 ldr r12, __func_offset
4949 add r12, r12, pc
4950 bx r12
4951 __func_offset:
4952 .word func - . */
4953
4954 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4955 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4956 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4957 static const insn32 a2t3_func_addr_insn = 0x00000001;
4958
4959 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4960 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4961 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4962
4963 #define ARM2THUMB_PIC_GLUE_SIZE 16
4964 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4965 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4966 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4967
4968 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4969
4970 .thumb .thumb
4971 .align 2 .align 2
4972 __func_from_thumb: __func_from_thumb:
4973 bx pc push {r6, lr}
4974 nop ldr r6, __func_addr
4975 .arm mov lr, pc
4976 b func bx r6
4977 .arm
4978 ;; back_to_thumb
4979 ldmia r13! {r6, lr}
4980 bx lr
4981 __func_addr:
4982 .word func */
4983
4984 #define THUMB2ARM_GLUE_SIZE 8
4985 static const insn16 t2a1_bx_pc_insn = 0x4778;
4986 static const insn16 t2a2_noop_insn = 0x46c0;
4987 static const insn32 t2a3_b_insn = 0xea000000;
4988
4989 #define VFP11_ERRATUM_VENEER_SIZE 8
4990
4991 #define ARM_BX_VENEER_SIZE 12
4992 static const insn32 armbx1_tst_insn = 0xe3100001;
4993 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4994 static const insn32 armbx3_bx_insn = 0xe12fff10;
4995
4996 #ifndef ELFARM_NABI_C_INCLUDED
4997 static void
4998 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4999 {
5000 asection * s;
5001 bfd_byte * contents;
5002
5003 if (size == 0)
5004 {
5005 /* Do not include empty glue sections in the output. */
5006 if (abfd != NULL)
5007 {
5008 s = bfd_get_section_by_name (abfd, name);
5009 if (s != NULL)
5010 s->flags |= SEC_EXCLUDE;
5011 }
5012 return;
5013 }
5014
5015 BFD_ASSERT (abfd != NULL);
5016
5017 s = bfd_get_section_by_name (abfd, name);
5018 BFD_ASSERT (s != NULL);
5019
5020 contents = (bfd_byte *) bfd_alloc (abfd, size);
5021
5022 BFD_ASSERT (s->size == size);
5023 s->contents = contents;
5024 }
5025
5026 bfd_boolean
5027 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5028 {
5029 struct elf32_arm_link_hash_table * globals;
5030
5031 globals = elf32_arm_hash_table (info);
5032 BFD_ASSERT (globals != NULL);
5033
5034 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5035 globals->arm_glue_size,
5036 ARM2THUMB_GLUE_SECTION_NAME);
5037
5038 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5039 globals->thumb_glue_size,
5040 THUMB2ARM_GLUE_SECTION_NAME);
5041
5042 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5043 globals->vfp11_erratum_glue_size,
5044 VFP11_ERRATUM_VENEER_SECTION_NAME);
5045
5046 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5047 globals->bx_glue_size,
5048 ARM_BX_GLUE_SECTION_NAME);
5049
5050 return TRUE;
5051 }
5052
5053 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5054 returns the symbol identifying the stub. */
5055
5056 static struct elf_link_hash_entry *
5057 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5058 struct elf_link_hash_entry * h)
5059 {
5060 const char * name = h->root.root.string;
5061 asection * s;
5062 char * tmp_name;
5063 struct elf_link_hash_entry * myh;
5064 struct bfd_link_hash_entry * bh;
5065 struct elf32_arm_link_hash_table * globals;
5066 bfd_vma val;
5067 bfd_size_type size;
5068
5069 globals = elf32_arm_hash_table (link_info);
5070 BFD_ASSERT (globals != NULL);
5071 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5072
5073 s = bfd_get_section_by_name
5074 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5075
5076 BFD_ASSERT (s != NULL);
5077
5078 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5079 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5080
5081 BFD_ASSERT (tmp_name);
5082
5083 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5084
5085 myh = elf_link_hash_lookup
5086 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5087
5088 if (myh != NULL)
5089 {
5090 /* We've already seen this guy. */
5091 free (tmp_name);
5092 return myh;
5093 }
5094
5095 /* The only trick here is using hash_table->arm_glue_size as the value.
5096 Even though the section isn't allocated yet, this is where we will be
5097 putting it. The +1 on the value marks that the stub has not been
5098 output yet - not that it is a Thumb function. */
5099 bh = NULL;
5100 val = globals->arm_glue_size + 1;
5101 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5102 tmp_name, BSF_GLOBAL, s, val,
5103 NULL, TRUE, FALSE, &bh);
5104
5105 myh = (struct elf_link_hash_entry *) bh;
5106 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5107 myh->forced_local = 1;
5108
5109 free (tmp_name);
5110
5111 if (link_info->shared || globals->root.is_relocatable_executable
5112 || globals->pic_veneer)
5113 size = ARM2THUMB_PIC_GLUE_SIZE;
5114 else if (globals->use_blx)
5115 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5116 else
5117 size = ARM2THUMB_STATIC_GLUE_SIZE;
5118
5119 s->size += size;
5120 globals->arm_glue_size += size;
5121
5122 return myh;
5123 }
5124
5125 /* Allocate space for ARMv4 BX veneers. */
5126
5127 static void
5128 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5129 {
5130 asection * s;
5131 struct elf32_arm_link_hash_table *globals;
5132 char *tmp_name;
5133 struct elf_link_hash_entry *myh;
5134 struct bfd_link_hash_entry *bh;
5135 bfd_vma val;
5136
5137 /* BX PC does not need a veneer. */
5138 if (reg == 15)
5139 return;
5140
5141 globals = elf32_arm_hash_table (link_info);
5142 BFD_ASSERT (globals != NULL);
5143 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5144
5145 /* Check if this veneer has already been allocated. */
5146 if (globals->bx_glue_offset[reg])
5147 return;
5148
5149 s = bfd_get_section_by_name
5150 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5151
5152 BFD_ASSERT (s != NULL);
5153
5154 /* Add symbol for veneer. */
5155 tmp_name = (char *)
5156 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5157
5158 BFD_ASSERT (tmp_name);
5159
5160 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5161
5162 myh = elf_link_hash_lookup
5163 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5164
5165 BFD_ASSERT (myh == NULL);
5166
5167 bh = NULL;
5168 val = globals->bx_glue_size;
5169 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5170 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5171 NULL, TRUE, FALSE, &bh);
5172
5173 myh = (struct elf_link_hash_entry *) bh;
5174 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5175 myh->forced_local = 1;
5176
5177 s->size += ARM_BX_VENEER_SIZE;
5178 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5179 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5180 }
5181
5182
5183 /* Add an entry to the code/data map for section SEC. */
5184
5185 static void
5186 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5187 {
5188 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5189 unsigned int newidx;
5190
5191 if (sec_data->map == NULL)
5192 {
5193 sec_data->map = (elf32_arm_section_map *)
5194 bfd_malloc (sizeof (elf32_arm_section_map));
5195 sec_data->mapcount = 0;
5196 sec_data->mapsize = 1;
5197 }
5198
5199 newidx = sec_data->mapcount++;
5200
5201 if (sec_data->mapcount > sec_data->mapsize)
5202 {
5203 sec_data->mapsize *= 2;
5204 sec_data->map = (elf32_arm_section_map *)
5205 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5206 * sizeof (elf32_arm_section_map));
5207 }
5208
5209 if (sec_data->map)
5210 {
5211 sec_data->map[newidx].vma = vma;
5212 sec_data->map[newidx].type = type;
5213 }
5214 }
5215
5216
5217 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5218 veneers are handled for now. */
5219
5220 static bfd_vma
5221 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5222 elf32_vfp11_erratum_list *branch,
5223 bfd *branch_bfd,
5224 asection *branch_sec,
5225 unsigned int offset)
5226 {
5227 asection *s;
5228 struct elf32_arm_link_hash_table *hash_table;
5229 char *tmp_name;
5230 struct elf_link_hash_entry *myh;
5231 struct bfd_link_hash_entry *bh;
5232 bfd_vma val;
5233 struct _arm_elf_section_data *sec_data;
5234 int errcount;
5235 elf32_vfp11_erratum_list *newerr;
5236
5237 hash_table = elf32_arm_hash_table (link_info);
5238 BFD_ASSERT (hash_table != NULL);
5239 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5240
5241 s = bfd_get_section_by_name
5242 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5243
5244 sec_data = elf32_arm_section_data (s);
5245
5246 BFD_ASSERT (s != NULL);
5247
5248 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5249 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5250
5251 BFD_ASSERT (tmp_name);
5252
5253 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5254 hash_table->num_vfp11_fixes);
5255
5256 myh = elf_link_hash_lookup
5257 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5258
5259 BFD_ASSERT (myh == NULL);
5260
5261 bh = NULL;
5262 val = hash_table->vfp11_erratum_glue_size;
5263 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5264 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5265 NULL, TRUE, FALSE, &bh);
5266
5267 myh = (struct elf_link_hash_entry *) bh;
5268 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5269 myh->forced_local = 1;
5270
5271 /* Link veneer back to calling location. */
5272 errcount = ++(sec_data->erratumcount);
5273 newerr = (elf32_vfp11_erratum_list *)
5274 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5275
5276 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5277 newerr->vma = -1;
5278 newerr->u.v.branch = branch;
5279 newerr->u.v.id = hash_table->num_vfp11_fixes;
5280 branch->u.b.veneer = newerr;
5281
5282 newerr->next = sec_data->erratumlist;
5283 sec_data->erratumlist = newerr;
5284
5285 /* A symbol for the return from the veneer. */
5286 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5287 hash_table->num_vfp11_fixes);
5288
5289 myh = elf_link_hash_lookup
5290 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5291
5292 if (myh != NULL)
5293 abort ();
5294
5295 bh = NULL;
5296 val = offset + 4;
5297 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5298 branch_sec, val, NULL, TRUE, FALSE, &bh);
5299
5300 myh = (struct elf_link_hash_entry *) bh;
5301 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5302 myh->forced_local = 1;
5303
5304 free (tmp_name);
5305
5306 /* Generate a mapping symbol for the veneer section, and explicitly add an
5307 entry for that symbol to the code/data map for the section. */
5308 if (hash_table->vfp11_erratum_glue_size == 0)
5309 {
5310 bh = NULL;
5311 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5312 ever requires this erratum fix. */
5313 _bfd_generic_link_add_one_symbol (link_info,
5314 hash_table->bfd_of_glue_owner, "$a",
5315 BSF_LOCAL, s, 0, NULL,
5316 TRUE, FALSE, &bh);
5317
5318 myh = (struct elf_link_hash_entry *) bh;
5319 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5320 myh->forced_local = 1;
5321
5322 /* The elf32_arm_init_maps function only cares about symbols from input
5323 BFDs. We must make a note of this generated mapping symbol
5324 ourselves so that code byteswapping works properly in
5325 elf32_arm_write_section. */
5326 elf32_arm_section_map_add (s, 'a', 0);
5327 }
5328
5329 s->size += VFP11_ERRATUM_VENEER_SIZE;
5330 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5331 hash_table->num_vfp11_fixes++;
5332
5333 /* The offset of the veneer. */
5334 return val;
5335 }
5336
5337 #define ARM_GLUE_SECTION_FLAGS \
5338 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5339 | SEC_READONLY | SEC_LINKER_CREATED)
5340
5341 /* Create a fake section for use by the ARM backend of the linker. */
5342
5343 static bfd_boolean
5344 arm_make_glue_section (bfd * abfd, const char * name)
5345 {
5346 asection * sec;
5347
5348 sec = bfd_get_section_by_name (abfd, name);
5349 if (sec != NULL)
5350 /* Already made. */
5351 return TRUE;
5352
5353 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5354
5355 if (sec == NULL
5356 || !bfd_set_section_alignment (abfd, sec, 2))
5357 return FALSE;
5358
5359 /* Set the gc mark to prevent the section from being removed by garbage
5360 collection, despite the fact that no relocs refer to this section. */
5361 sec->gc_mark = 1;
5362
5363 return TRUE;
5364 }
5365
5366 /* Add the glue sections to ABFD. This function is called from the
5367 linker scripts in ld/emultempl/{armelf}.em. */
5368
5369 bfd_boolean
5370 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5371 struct bfd_link_info *info)
5372 {
5373 /* If we are only performing a partial
5374 link do not bother adding the glue. */
5375 if (info->relocatable)
5376 return TRUE;
5377
5378 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5379 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5380 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5381 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5382 }
5383
5384 /* Select a BFD to be used to hold the sections used by the glue code.
5385 This function is called from the linker scripts in ld/emultempl/
5386 {armelf/pe}.em. */
5387
5388 bfd_boolean
5389 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5390 {
5391 struct elf32_arm_link_hash_table *globals;
5392
5393 /* If we are only performing a partial link
5394 do not bother getting a bfd to hold the glue. */
5395 if (info->relocatable)
5396 return TRUE;
5397
5398 /* Make sure we don't attach the glue sections to a dynamic object. */
5399 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5400
5401 globals = elf32_arm_hash_table (info);
5402 BFD_ASSERT (globals != NULL);
5403
5404 if (globals->bfd_of_glue_owner != NULL)
5405 return TRUE;
5406
5407 /* Save the bfd for later use. */
5408 globals->bfd_of_glue_owner = abfd;
5409
5410 return TRUE;
5411 }
5412
5413 static void
5414 check_use_blx (struct elf32_arm_link_hash_table *globals)
5415 {
5416 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5417 Tag_CPU_arch) > 2)
5418 globals->use_blx = 1;
5419 }
5420
5421 bfd_boolean
5422 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5423 struct bfd_link_info *link_info)
5424 {
5425 Elf_Internal_Shdr *symtab_hdr;
5426 Elf_Internal_Rela *internal_relocs = NULL;
5427 Elf_Internal_Rela *irel, *irelend;
5428 bfd_byte *contents = NULL;
5429
5430 asection *sec;
5431 struct elf32_arm_link_hash_table *globals;
5432
5433 /* If we are only performing a partial link do not bother
5434 to construct any glue. */
5435 if (link_info->relocatable)
5436 return TRUE;
5437
5438 /* Here we have a bfd that is to be included on the link. We have a
5439 hook to do reloc rummaging, before section sizes are nailed down. */
5440 globals = elf32_arm_hash_table (link_info);
5441 BFD_ASSERT (globals != NULL);
5442
5443 check_use_blx (globals);
5444
5445 if (globals->byteswap_code && !bfd_big_endian (abfd))
5446 {
5447 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5448 abfd);
5449 return FALSE;
5450 }
5451
5452 /* PR 5398: If we have not decided to include any loadable sections in
5453 the output then we will not have a glue owner bfd. This is OK, it
5454 just means that there is nothing else for us to do here. */
5455 if (globals->bfd_of_glue_owner == NULL)
5456 return TRUE;
5457
5458 /* Rummage around all the relocs and map the glue vectors. */
5459 sec = abfd->sections;
5460
5461 if (sec == NULL)
5462 return TRUE;
5463
5464 for (; sec != NULL; sec = sec->next)
5465 {
5466 if (sec->reloc_count == 0)
5467 continue;
5468
5469 if ((sec->flags & SEC_EXCLUDE) != 0)
5470 continue;
5471
5472 symtab_hdr = & elf_symtab_hdr (abfd);
5473
5474 /* Load the relocs. */
5475 internal_relocs
5476 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5477
5478 if (internal_relocs == NULL)
5479 goto error_return;
5480
5481 irelend = internal_relocs + sec->reloc_count;
5482 for (irel = internal_relocs; irel < irelend; irel++)
5483 {
5484 long r_type;
5485 unsigned long r_index;
5486
5487 struct elf_link_hash_entry *h;
5488
5489 r_type = ELF32_R_TYPE (irel->r_info);
5490 r_index = ELF32_R_SYM (irel->r_info);
5491
5492 /* These are the only relocation types we care about. */
5493 if ( r_type != R_ARM_PC24
5494 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5495 continue;
5496
5497 /* Get the section contents if we haven't done so already. */
5498 if (contents == NULL)
5499 {
5500 /* Get cached copy if it exists. */
5501 if (elf_section_data (sec)->this_hdr.contents != NULL)
5502 contents = elf_section_data (sec)->this_hdr.contents;
5503 else
5504 {
5505 /* Go get them off disk. */
5506 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5507 goto error_return;
5508 }
5509 }
5510
5511 if (r_type == R_ARM_V4BX)
5512 {
5513 int reg;
5514
5515 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5516 record_arm_bx_glue (link_info, reg);
5517 continue;
5518 }
5519
5520 /* If the relocation is not against a symbol it cannot concern us. */
5521 h = NULL;
5522
5523 /* We don't care about local symbols. */
5524 if (r_index < symtab_hdr->sh_info)
5525 continue;
5526
5527 /* This is an external symbol. */
5528 r_index -= symtab_hdr->sh_info;
5529 h = (struct elf_link_hash_entry *)
5530 elf_sym_hashes (abfd)[r_index];
5531
5532 /* If the relocation is against a static symbol it must be within
5533 the current section and so cannot be a cross ARM/Thumb relocation. */
5534 if (h == NULL)
5535 continue;
5536
5537 /* If the call will go through a PLT entry then we do not need
5538 glue. */
5539 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5540 continue;
5541
5542 switch (r_type)
5543 {
5544 case R_ARM_PC24:
5545 /* This one is a call from arm code. We need to look up
5546 the target of the call. If it is a thumb target, we
5547 insert glue. */
5548 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5549 record_arm_to_thumb_glue (link_info, h);
5550 break;
5551
5552 default:
5553 abort ();
5554 }
5555 }
5556
5557 if (contents != NULL
5558 && elf_section_data (sec)->this_hdr.contents != contents)
5559 free (contents);
5560 contents = NULL;
5561
5562 if (internal_relocs != NULL
5563 && elf_section_data (sec)->relocs != internal_relocs)
5564 free (internal_relocs);
5565 internal_relocs = NULL;
5566 }
5567
5568 return TRUE;
5569
5570 error_return:
5571 if (contents != NULL
5572 && elf_section_data (sec)->this_hdr.contents != contents)
5573 free (contents);
5574 if (internal_relocs != NULL
5575 && elf_section_data (sec)->relocs != internal_relocs)
5576 free (internal_relocs);
5577
5578 return FALSE;
5579 }
5580 #endif
5581
5582
5583 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5584
5585 void
5586 bfd_elf32_arm_init_maps (bfd *abfd)
5587 {
5588 Elf_Internal_Sym *isymbuf;
5589 Elf_Internal_Shdr *hdr;
5590 unsigned int i, localsyms;
5591
5592 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5593 if (! is_arm_elf (abfd))
5594 return;
5595
5596 if ((abfd->flags & DYNAMIC) != 0)
5597 return;
5598
5599 hdr = & elf_symtab_hdr (abfd);
5600 localsyms = hdr->sh_info;
5601
5602 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5603 should contain the number of local symbols, which should come before any
5604 global symbols. Mapping symbols are always local. */
5605 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5606 NULL);
5607
5608 /* No internal symbols read? Skip this BFD. */
5609 if (isymbuf == NULL)
5610 return;
5611
5612 for (i = 0; i < localsyms; i++)
5613 {
5614 Elf_Internal_Sym *isym = &isymbuf[i];
5615 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5616 const char *name;
5617
5618 if (sec != NULL
5619 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5620 {
5621 name = bfd_elf_string_from_elf_section (abfd,
5622 hdr->sh_link, isym->st_name);
5623
5624 if (bfd_is_arm_special_symbol_name (name,
5625 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5626 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5627 }
5628 }
5629 }
5630
5631
5632 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5633 say what they wanted. */
5634
5635 void
5636 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5637 {
5638 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5639 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5640
5641 if (globals == NULL)
5642 return;
5643
5644 if (globals->fix_cortex_a8 == -1)
5645 {
5646 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5647 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5648 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5649 || out_attr[Tag_CPU_arch_profile].i == 0))
5650 globals->fix_cortex_a8 = 1;
5651 else
5652 globals->fix_cortex_a8 = 0;
5653 }
5654 }
5655
5656
5657 void
5658 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5659 {
5660 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5661 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5662
5663 if (globals == NULL)
5664 return;
5665 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5666 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5667 {
5668 switch (globals->vfp11_fix)
5669 {
5670 case BFD_ARM_VFP11_FIX_DEFAULT:
5671 case BFD_ARM_VFP11_FIX_NONE:
5672 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5673 break;
5674
5675 default:
5676 /* Give a warning, but do as the user requests anyway. */
5677 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5678 "workaround is not necessary for target architecture"), obfd);
5679 }
5680 }
5681 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5682 /* For earlier architectures, we might need the workaround, but do not
5683 enable it by default. If users is running with broken hardware, they
5684 must enable the erratum fix explicitly. */
5685 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5686 }
5687
5688
5689 enum bfd_arm_vfp11_pipe
5690 {
5691 VFP11_FMAC,
5692 VFP11_LS,
5693 VFP11_DS,
5694 VFP11_BAD
5695 };
5696
5697 /* Return a VFP register number. This is encoded as RX:X for single-precision
5698 registers, or X:RX for double-precision registers, where RX is the group of
5699 four bits in the instruction encoding and X is the single extension bit.
5700 RX and X fields are specified using their lowest (starting) bit. The return
5701 value is:
5702
5703 0...31: single-precision registers s0...s31
5704 32...63: double-precision registers d0...d31.
5705
5706 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5707 encounter VFP3 instructions, so we allow the full range for DP registers. */
5708
5709 static unsigned int
5710 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5711 unsigned int x)
5712 {
5713 if (is_double)
5714 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5715 else
5716 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5717 }
5718
5719 /* Set bits in *WMASK according to a register number REG as encoded by
5720 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5721
5722 static void
5723 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5724 {
5725 if (reg < 32)
5726 *wmask |= 1 << reg;
5727 else if (reg < 48)
5728 *wmask |= 3 << ((reg - 32) * 2);
5729 }
5730
5731 /* Return TRUE if WMASK overwrites anything in REGS. */
5732
5733 static bfd_boolean
5734 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5735 {
5736 int i;
5737
5738 for (i = 0; i < numregs; i++)
5739 {
5740 unsigned int reg = regs[i];
5741
5742 if (reg < 32 && (wmask & (1 << reg)) != 0)
5743 return TRUE;
5744
5745 reg -= 32;
5746
5747 if (reg >= 16)
5748 continue;
5749
5750 if ((wmask & (3 << (reg * 2))) != 0)
5751 return TRUE;
5752 }
5753
5754 return FALSE;
5755 }
5756
5757 /* In this function, we're interested in two things: finding input registers
5758 for VFP data-processing instructions, and finding the set of registers which
5759 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5760 hold the written set, so FLDM etc. are easy to deal with (we're only
5761 interested in 32 SP registers or 16 dp registers, due to the VFP version
5762 implemented by the chip in question). DP registers are marked by setting
5763 both SP registers in the write mask). */
5764
5765 static enum bfd_arm_vfp11_pipe
5766 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5767 int *numregs)
5768 {
5769 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5770 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5771
5772 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5773 {
5774 unsigned int pqrs;
5775 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5776 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5777
5778 pqrs = ((insn & 0x00800000) >> 20)
5779 | ((insn & 0x00300000) >> 19)
5780 | ((insn & 0x00000040) >> 6);
5781
5782 switch (pqrs)
5783 {
5784 case 0: /* fmac[sd]. */
5785 case 1: /* fnmac[sd]. */
5786 case 2: /* fmsc[sd]. */
5787 case 3: /* fnmsc[sd]. */
5788 vpipe = VFP11_FMAC;
5789 bfd_arm_vfp11_write_mask (destmask, fd);
5790 regs[0] = fd;
5791 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5792 regs[2] = fm;
5793 *numregs = 3;
5794 break;
5795
5796 case 4: /* fmul[sd]. */
5797 case 5: /* fnmul[sd]. */
5798 case 6: /* fadd[sd]. */
5799 case 7: /* fsub[sd]. */
5800 vpipe = VFP11_FMAC;
5801 goto vfp_binop;
5802
5803 case 8: /* fdiv[sd]. */
5804 vpipe = VFP11_DS;
5805 vfp_binop:
5806 bfd_arm_vfp11_write_mask (destmask, fd);
5807 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5808 regs[1] = fm;
5809 *numregs = 2;
5810 break;
5811
5812 case 15: /* extended opcode. */
5813 {
5814 unsigned int extn = ((insn >> 15) & 0x1e)
5815 | ((insn >> 7) & 1);
5816
5817 switch (extn)
5818 {
5819 case 0: /* fcpy[sd]. */
5820 case 1: /* fabs[sd]. */
5821 case 2: /* fneg[sd]. */
5822 case 8: /* fcmp[sd]. */
5823 case 9: /* fcmpe[sd]. */
5824 case 10: /* fcmpz[sd]. */
5825 case 11: /* fcmpez[sd]. */
5826 case 16: /* fuito[sd]. */
5827 case 17: /* fsito[sd]. */
5828 case 24: /* ftoui[sd]. */
5829 case 25: /* ftouiz[sd]. */
5830 case 26: /* ftosi[sd]. */
5831 case 27: /* ftosiz[sd]. */
5832 /* These instructions will not bounce due to underflow. */
5833 *numregs = 0;
5834 vpipe = VFP11_FMAC;
5835 break;
5836
5837 case 3: /* fsqrt[sd]. */
5838 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5839 registers to cause the erratum in previous instructions. */
5840 bfd_arm_vfp11_write_mask (destmask, fd);
5841 vpipe = VFP11_DS;
5842 break;
5843
5844 case 15: /* fcvt{ds,sd}. */
5845 {
5846 int rnum = 0;
5847
5848 bfd_arm_vfp11_write_mask (destmask, fd);
5849
5850 /* Only FCVTSD can underflow. */
5851 if ((insn & 0x100) != 0)
5852 regs[rnum++] = fm;
5853
5854 *numregs = rnum;
5855
5856 vpipe = VFP11_FMAC;
5857 }
5858 break;
5859
5860 default:
5861 return VFP11_BAD;
5862 }
5863 }
5864 break;
5865
5866 default:
5867 return VFP11_BAD;
5868 }
5869 }
5870 /* Two-register transfer. */
5871 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5872 {
5873 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5874
5875 if ((insn & 0x100000) == 0)
5876 {
5877 if (is_double)
5878 bfd_arm_vfp11_write_mask (destmask, fm);
5879 else
5880 {
5881 bfd_arm_vfp11_write_mask (destmask, fm);
5882 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5883 }
5884 }
5885
5886 vpipe = VFP11_LS;
5887 }
5888 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5889 {
5890 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5891 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5892
5893 switch (puw)
5894 {
5895 case 0: /* Two-reg transfer. We should catch these above. */
5896 abort ();
5897
5898 case 2: /* fldm[sdx]. */
5899 case 3:
5900 case 5:
5901 {
5902 unsigned int i, offset = insn & 0xff;
5903
5904 if (is_double)
5905 offset >>= 1;
5906
5907 for (i = fd; i < fd + offset; i++)
5908 bfd_arm_vfp11_write_mask (destmask, i);
5909 }
5910 break;
5911
5912 case 4: /* fld[sd]. */
5913 case 6:
5914 bfd_arm_vfp11_write_mask (destmask, fd);
5915 break;
5916
5917 default:
5918 return VFP11_BAD;
5919 }
5920
5921 vpipe = VFP11_LS;
5922 }
5923 /* Single-register transfer. Note L==0. */
5924 else if ((insn & 0x0f100e10) == 0x0e000a10)
5925 {
5926 unsigned int opcode = (insn >> 21) & 7;
5927 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5928
5929 switch (opcode)
5930 {
5931 case 0: /* fmsr/fmdlr. */
5932 case 1: /* fmdhr. */
5933 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5934 destination register. I don't know if this is exactly right,
5935 but it is the conservative choice. */
5936 bfd_arm_vfp11_write_mask (destmask, fn);
5937 break;
5938
5939 case 7: /* fmxr. */
5940 break;
5941 }
5942
5943 vpipe = VFP11_LS;
5944 }
5945
5946 return vpipe;
5947 }
5948
5949
5950 static int elf32_arm_compare_mapping (const void * a, const void * b);
5951
5952
5953 /* Look for potentially-troublesome code sequences which might trigger the
5954 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5955 (available from ARM) for details of the erratum. A short version is
5956 described in ld.texinfo. */
5957
5958 bfd_boolean
5959 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5960 {
5961 asection *sec;
5962 bfd_byte *contents = NULL;
5963 int state = 0;
5964 int regs[3], numregs = 0;
5965 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5966 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5967
5968 if (globals == NULL)
5969 return FALSE;
5970
5971 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5972 The states transition as follows:
5973
5974 0 -> 1 (vector) or 0 -> 2 (scalar)
5975 A VFP FMAC-pipeline instruction has been seen. Fill
5976 regs[0]..regs[numregs-1] with its input operands. Remember this
5977 instruction in 'first_fmac'.
5978
5979 1 -> 2
5980 Any instruction, except for a VFP instruction which overwrites
5981 regs[*].
5982
5983 1 -> 3 [ -> 0 ] or
5984 2 -> 3 [ -> 0 ]
5985 A VFP instruction has been seen which overwrites any of regs[*].
5986 We must make a veneer! Reset state to 0 before examining next
5987 instruction.
5988
5989 2 -> 0
5990 If we fail to match anything in state 2, reset to state 0 and reset
5991 the instruction pointer to the instruction after 'first_fmac'.
5992
5993 If the VFP11 vector mode is in use, there must be at least two unrelated
5994 instructions between anti-dependent VFP11 instructions to properly avoid
5995 triggering the erratum, hence the use of the extra state 1. */
5996
5997 /* If we are only performing a partial link do not bother
5998 to construct any glue. */
5999 if (link_info->relocatable)
6000 return TRUE;
6001
6002 /* Skip if this bfd does not correspond to an ELF image. */
6003 if (! is_arm_elf (abfd))
6004 return TRUE;
6005
6006 /* We should have chosen a fix type by the time we get here. */
6007 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6008
6009 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6010 return TRUE;
6011
6012 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6013 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6014 return TRUE;
6015
6016 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6017 {
6018 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6019 struct _arm_elf_section_data *sec_data;
6020
6021 /* If we don't have executable progbits, we're not interested in this
6022 section. Also skip if section is to be excluded. */
6023 if (elf_section_type (sec) != SHT_PROGBITS
6024 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6025 || (sec->flags & SEC_EXCLUDE) != 0
6026 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
6027 || sec->output_section == bfd_abs_section_ptr
6028 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6029 continue;
6030
6031 sec_data = elf32_arm_section_data (sec);
6032
6033 if (sec_data->mapcount == 0)
6034 continue;
6035
6036 if (elf_section_data (sec)->this_hdr.contents != NULL)
6037 contents = elf_section_data (sec)->this_hdr.contents;
6038 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6039 goto error_return;
6040
6041 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6042 elf32_arm_compare_mapping);
6043
6044 for (span = 0; span < sec_data->mapcount; span++)
6045 {
6046 unsigned int span_start = sec_data->map[span].vma;
6047 unsigned int span_end = (span == sec_data->mapcount - 1)
6048 ? sec->size : sec_data->map[span + 1].vma;
6049 char span_type = sec_data->map[span].type;
6050
6051 /* FIXME: Only ARM mode is supported at present. We may need to
6052 support Thumb-2 mode also at some point. */
6053 if (span_type != 'a')
6054 continue;
6055
6056 for (i = span_start; i < span_end;)
6057 {
6058 unsigned int next_i = i + 4;
6059 unsigned int insn = bfd_big_endian (abfd)
6060 ? (contents[i] << 24)
6061 | (contents[i + 1] << 16)
6062 | (contents[i + 2] << 8)
6063 | contents[i + 3]
6064 : (contents[i + 3] << 24)
6065 | (contents[i + 2] << 16)
6066 | (contents[i + 1] << 8)
6067 | contents[i];
6068 unsigned int writemask = 0;
6069 enum bfd_arm_vfp11_pipe vpipe;
6070
6071 switch (state)
6072 {
6073 case 0:
6074 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6075 &numregs);
6076 /* I'm assuming the VFP11 erratum can trigger with denorm
6077 operands on either the FMAC or the DS pipeline. This might
6078 lead to slightly overenthusiastic veneer insertion. */
6079 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6080 {
6081 state = use_vector ? 1 : 2;
6082 first_fmac = i;
6083 veneer_of_insn = insn;
6084 }
6085 break;
6086
6087 case 1:
6088 {
6089 int other_regs[3], other_numregs;
6090 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6091 other_regs,
6092 &other_numregs);
6093 if (vpipe != VFP11_BAD
6094 && bfd_arm_vfp11_antidependency (writemask, regs,
6095 numregs))
6096 state = 3;
6097 else
6098 state = 2;
6099 }
6100 break;
6101
6102 case 2:
6103 {
6104 int other_regs[3], other_numregs;
6105 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6106 other_regs,
6107 &other_numregs);
6108 if (vpipe != VFP11_BAD
6109 && bfd_arm_vfp11_antidependency (writemask, regs,
6110 numregs))
6111 state = 3;
6112 else
6113 {
6114 state = 0;
6115 next_i = first_fmac + 4;
6116 }
6117 }
6118 break;
6119
6120 case 3:
6121 abort (); /* Should be unreachable. */
6122 }
6123
6124 if (state == 3)
6125 {
6126 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6127 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6128 int errcount;
6129
6130 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6131
6132 newerr->u.b.vfp_insn = veneer_of_insn;
6133
6134 switch (span_type)
6135 {
6136 case 'a':
6137 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6138 break;
6139
6140 default:
6141 abort ();
6142 }
6143
6144 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6145 first_fmac);
6146
6147 newerr->vma = -1;
6148
6149 newerr->next = sec_data->erratumlist;
6150 sec_data->erratumlist = newerr;
6151
6152 state = 0;
6153 }
6154
6155 i = next_i;
6156 }
6157 }
6158
6159 if (contents != NULL
6160 && elf_section_data (sec)->this_hdr.contents != contents)
6161 free (contents);
6162 contents = NULL;
6163 }
6164
6165 return TRUE;
6166
6167 error_return:
6168 if (contents != NULL
6169 && elf_section_data (sec)->this_hdr.contents != contents)
6170 free (contents);
6171
6172 return FALSE;
6173 }
6174
6175 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6176 after sections have been laid out, using specially-named symbols. */
6177
6178 void
6179 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6180 struct bfd_link_info *link_info)
6181 {
6182 asection *sec;
6183 struct elf32_arm_link_hash_table *globals;
6184 char *tmp_name;
6185
6186 if (link_info->relocatable)
6187 return;
6188
6189 /* Skip if this bfd does not correspond to an ELF image. */
6190 if (! is_arm_elf (abfd))
6191 return;
6192
6193 globals = elf32_arm_hash_table (link_info);
6194 if (globals == NULL)
6195 return;
6196
6197 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6198 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6199
6200 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6201 {
6202 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6203 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6204
6205 for (; errnode != NULL; errnode = errnode->next)
6206 {
6207 struct elf_link_hash_entry *myh;
6208 bfd_vma vma;
6209
6210 switch (errnode->type)
6211 {
6212 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6213 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6214 /* Find veneer symbol. */
6215 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6216 errnode->u.b.veneer->u.v.id);
6217
6218 myh = elf_link_hash_lookup
6219 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6220
6221 if (myh == NULL)
6222 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6223 "`%s'"), abfd, tmp_name);
6224
6225 vma = myh->root.u.def.section->output_section->vma
6226 + myh->root.u.def.section->output_offset
6227 + myh->root.u.def.value;
6228
6229 errnode->u.b.veneer->vma = vma;
6230 break;
6231
6232 case VFP11_ERRATUM_ARM_VENEER:
6233 case VFP11_ERRATUM_THUMB_VENEER:
6234 /* Find return location. */
6235 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6236 errnode->u.v.id);
6237
6238 myh = elf_link_hash_lookup
6239 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6240
6241 if (myh == NULL)
6242 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6243 "`%s'"), abfd, tmp_name);
6244
6245 vma = myh->root.u.def.section->output_section->vma
6246 + myh->root.u.def.section->output_offset
6247 + myh->root.u.def.value;
6248
6249 errnode->u.v.branch->vma = vma;
6250 break;
6251
6252 default:
6253 abort ();
6254 }
6255 }
6256 }
6257
6258 free (tmp_name);
6259 }
6260
6261
6262 /* Set target relocation values needed during linking. */
6263
6264 void
6265 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6266 struct bfd_link_info *link_info,
6267 int target1_is_rel,
6268 char * target2_type,
6269 int fix_v4bx,
6270 int use_blx,
6271 bfd_arm_vfp11_fix vfp11_fix,
6272 int no_enum_warn, int no_wchar_warn,
6273 int pic_veneer, int fix_cortex_a8)
6274 {
6275 struct elf32_arm_link_hash_table *globals;
6276
6277 globals = elf32_arm_hash_table (link_info);
6278 if (globals == NULL)
6279 return;
6280
6281 globals->target1_is_rel = target1_is_rel;
6282 if (strcmp (target2_type, "rel") == 0)
6283 globals->target2_reloc = R_ARM_REL32;
6284 else if (strcmp (target2_type, "abs") == 0)
6285 globals->target2_reloc = R_ARM_ABS32;
6286 else if (strcmp (target2_type, "got-rel") == 0)
6287 globals->target2_reloc = R_ARM_GOT_PREL;
6288 else
6289 {
6290 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6291 target2_type);
6292 }
6293 globals->fix_v4bx = fix_v4bx;
6294 globals->use_blx |= use_blx;
6295 globals->vfp11_fix = vfp11_fix;
6296 globals->pic_veneer = pic_veneer;
6297 globals->fix_cortex_a8 = fix_cortex_a8;
6298
6299 BFD_ASSERT (is_arm_elf (output_bfd));
6300 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6301 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6302 }
6303
6304 /* Replace the target offset of a Thumb bl or b.w instruction. */
6305
6306 static void
6307 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6308 {
6309 bfd_vma upper;
6310 bfd_vma lower;
6311 int reloc_sign;
6312
6313 BFD_ASSERT ((offset & 1) == 0);
6314
6315 upper = bfd_get_16 (abfd, insn);
6316 lower = bfd_get_16 (abfd, insn + 2);
6317 reloc_sign = (offset < 0) ? 1 : 0;
6318 upper = (upper & ~(bfd_vma) 0x7ff)
6319 | ((offset >> 12) & 0x3ff)
6320 | (reloc_sign << 10);
6321 lower = (lower & ~(bfd_vma) 0x2fff)
6322 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6323 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6324 | ((offset >> 1) & 0x7ff);
6325 bfd_put_16 (abfd, upper, insn);
6326 bfd_put_16 (abfd, lower, insn + 2);
6327 }
6328
6329 /* Thumb code calling an ARM function. */
6330
6331 static int
6332 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6333 const char * name,
6334 bfd * input_bfd,
6335 bfd * output_bfd,
6336 asection * input_section,
6337 bfd_byte * hit_data,
6338 asection * sym_sec,
6339 bfd_vma offset,
6340 bfd_signed_vma addend,
6341 bfd_vma val,
6342 char **error_message)
6343 {
6344 asection * s = 0;
6345 bfd_vma my_offset;
6346 long int ret_offset;
6347 struct elf_link_hash_entry * myh;
6348 struct elf32_arm_link_hash_table * globals;
6349
6350 myh = find_thumb_glue (info, name, error_message);
6351 if (myh == NULL)
6352 return FALSE;
6353
6354 globals = elf32_arm_hash_table (info);
6355 BFD_ASSERT (globals != NULL);
6356 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6357
6358 my_offset = myh->root.u.def.value;
6359
6360 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6361 THUMB2ARM_GLUE_SECTION_NAME);
6362
6363 BFD_ASSERT (s != NULL);
6364 BFD_ASSERT (s->contents != NULL);
6365 BFD_ASSERT (s->output_section != NULL);
6366
6367 if ((my_offset & 0x01) == 0x01)
6368 {
6369 if (sym_sec != NULL
6370 && sym_sec->owner != NULL
6371 && !INTERWORK_FLAG (sym_sec->owner))
6372 {
6373 (*_bfd_error_handler)
6374 (_("%B(%s): warning: interworking not enabled.\n"
6375 " first occurrence: %B: thumb call to arm"),
6376 sym_sec->owner, input_bfd, name);
6377
6378 return FALSE;
6379 }
6380
6381 --my_offset;
6382 myh->root.u.def.value = my_offset;
6383
6384 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6385 s->contents + my_offset);
6386
6387 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6388 s->contents + my_offset + 2);
6389
6390 ret_offset =
6391 /* Address of destination of the stub. */
6392 ((bfd_signed_vma) val)
6393 - ((bfd_signed_vma)
6394 /* Offset from the start of the current section
6395 to the start of the stubs. */
6396 (s->output_offset
6397 /* Offset of the start of this stub from the start of the stubs. */
6398 + my_offset
6399 /* Address of the start of the current section. */
6400 + s->output_section->vma)
6401 /* The branch instruction is 4 bytes into the stub. */
6402 + 4
6403 /* ARM branches work from the pc of the instruction + 8. */
6404 + 8);
6405
6406 put_arm_insn (globals, output_bfd,
6407 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6408 s->contents + my_offset + 4);
6409 }
6410
6411 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6412
6413 /* Now go back and fix up the original BL insn to point to here. */
6414 ret_offset =
6415 /* Address of where the stub is located. */
6416 (s->output_section->vma + s->output_offset + my_offset)
6417 /* Address of where the BL is located. */
6418 - (input_section->output_section->vma + input_section->output_offset
6419 + offset)
6420 /* Addend in the relocation. */
6421 - addend
6422 /* Biassing for PC-relative addressing. */
6423 - 8;
6424
6425 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6426
6427 return TRUE;
6428 }
6429
6430 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6431
6432 static struct elf_link_hash_entry *
6433 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6434 const char * name,
6435 bfd * input_bfd,
6436 bfd * output_bfd,
6437 asection * sym_sec,
6438 bfd_vma val,
6439 asection * s,
6440 char ** error_message)
6441 {
6442 bfd_vma my_offset;
6443 long int ret_offset;
6444 struct elf_link_hash_entry * myh;
6445 struct elf32_arm_link_hash_table * globals;
6446
6447 myh = find_arm_glue (info, name, error_message);
6448 if (myh == NULL)
6449 return NULL;
6450
6451 globals = elf32_arm_hash_table (info);
6452 BFD_ASSERT (globals != NULL);
6453 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6454
6455 my_offset = myh->root.u.def.value;
6456
6457 if ((my_offset & 0x01) == 0x01)
6458 {
6459 if (sym_sec != NULL
6460 && sym_sec->owner != NULL
6461 && !INTERWORK_FLAG (sym_sec->owner))
6462 {
6463 (*_bfd_error_handler)
6464 (_("%B(%s): warning: interworking not enabled.\n"
6465 " first occurrence: %B: arm call to thumb"),
6466 sym_sec->owner, input_bfd, name);
6467 }
6468
6469 --my_offset;
6470 myh->root.u.def.value = my_offset;
6471
6472 if (info->shared || globals->root.is_relocatable_executable
6473 || globals->pic_veneer)
6474 {
6475 /* For relocatable objects we can't use absolute addresses,
6476 so construct the address from a relative offset. */
6477 /* TODO: If the offset is small it's probably worth
6478 constructing the address with adds. */
6479 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6480 s->contents + my_offset);
6481 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6482 s->contents + my_offset + 4);
6483 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6484 s->contents + my_offset + 8);
6485 /* Adjust the offset by 4 for the position of the add,
6486 and 8 for the pipeline offset. */
6487 ret_offset = (val - (s->output_offset
6488 + s->output_section->vma
6489 + my_offset + 12))
6490 | 1;
6491 bfd_put_32 (output_bfd, ret_offset,
6492 s->contents + my_offset + 12);
6493 }
6494 else if (globals->use_blx)
6495 {
6496 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6497 s->contents + my_offset);
6498
6499 /* It's a thumb address. Add the low order bit. */
6500 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6501 s->contents + my_offset + 4);
6502 }
6503 else
6504 {
6505 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6506 s->contents + my_offset);
6507
6508 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6509 s->contents + my_offset + 4);
6510
6511 /* It's a thumb address. Add the low order bit. */
6512 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6513 s->contents + my_offset + 8);
6514
6515 my_offset += 12;
6516 }
6517 }
6518
6519 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6520
6521 return myh;
6522 }
6523
6524 /* Arm code calling a Thumb function. */
6525
6526 static int
6527 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6528 const char * name,
6529 bfd * input_bfd,
6530 bfd * output_bfd,
6531 asection * input_section,
6532 bfd_byte * hit_data,
6533 asection * sym_sec,
6534 bfd_vma offset,
6535 bfd_signed_vma addend,
6536 bfd_vma val,
6537 char **error_message)
6538 {
6539 unsigned long int tmp;
6540 bfd_vma my_offset;
6541 asection * s;
6542 long int ret_offset;
6543 struct elf_link_hash_entry * myh;
6544 struct elf32_arm_link_hash_table * globals;
6545
6546 globals = elf32_arm_hash_table (info);
6547 BFD_ASSERT (globals != NULL);
6548 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6549
6550 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6551 ARM2THUMB_GLUE_SECTION_NAME);
6552 BFD_ASSERT (s != NULL);
6553 BFD_ASSERT (s->contents != NULL);
6554 BFD_ASSERT (s->output_section != NULL);
6555
6556 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6557 sym_sec, val, s, error_message);
6558 if (!myh)
6559 return FALSE;
6560
6561 my_offset = myh->root.u.def.value;
6562 tmp = bfd_get_32 (input_bfd, hit_data);
6563 tmp = tmp & 0xFF000000;
6564
6565 /* Somehow these are both 4 too far, so subtract 8. */
6566 ret_offset = (s->output_offset
6567 + my_offset
6568 + s->output_section->vma
6569 - (input_section->output_offset
6570 + input_section->output_section->vma
6571 + offset + addend)
6572 - 8);
6573
6574 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6575
6576 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6577
6578 return TRUE;
6579 }
6580
6581 /* Populate Arm stub for an exported Thumb function. */
6582
6583 static bfd_boolean
6584 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6585 {
6586 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6587 asection * s;
6588 struct elf_link_hash_entry * myh;
6589 struct elf32_arm_link_hash_entry *eh;
6590 struct elf32_arm_link_hash_table * globals;
6591 asection *sec;
6592 bfd_vma val;
6593 char *error_message;
6594
6595 eh = elf32_arm_hash_entry (h);
6596 /* Allocate stubs for exported Thumb functions on v4t. */
6597 if (eh->export_glue == NULL)
6598 return TRUE;
6599
6600 globals = elf32_arm_hash_table (info);
6601 BFD_ASSERT (globals != NULL);
6602 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6603
6604 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6605 ARM2THUMB_GLUE_SECTION_NAME);
6606 BFD_ASSERT (s != NULL);
6607 BFD_ASSERT (s->contents != NULL);
6608 BFD_ASSERT (s->output_section != NULL);
6609
6610 sec = eh->export_glue->root.u.def.section;
6611
6612 BFD_ASSERT (sec->output_section != NULL);
6613
6614 val = eh->export_glue->root.u.def.value + sec->output_offset
6615 + sec->output_section->vma;
6616
6617 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6618 h->root.u.def.section->owner,
6619 globals->obfd, sec, val, s,
6620 &error_message);
6621 BFD_ASSERT (myh);
6622 return TRUE;
6623 }
6624
6625 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6626
6627 static bfd_vma
6628 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6629 {
6630 bfd_byte *p;
6631 bfd_vma glue_addr;
6632 asection *s;
6633 struct elf32_arm_link_hash_table *globals;
6634
6635 globals = elf32_arm_hash_table (info);
6636 BFD_ASSERT (globals != NULL);
6637 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6638
6639 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6640 ARM_BX_GLUE_SECTION_NAME);
6641 BFD_ASSERT (s != NULL);
6642 BFD_ASSERT (s->contents != NULL);
6643 BFD_ASSERT (s->output_section != NULL);
6644
6645 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6646
6647 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6648
6649 if ((globals->bx_glue_offset[reg] & 1) == 0)
6650 {
6651 p = s->contents + glue_addr;
6652 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6653 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6654 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6655 globals->bx_glue_offset[reg] |= 1;
6656 }
6657
6658 return glue_addr + s->output_section->vma + s->output_offset;
6659 }
6660
6661 /* Generate Arm stubs for exported Thumb symbols. */
6662 static void
6663 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6664 struct bfd_link_info *link_info)
6665 {
6666 struct elf32_arm_link_hash_table * globals;
6667
6668 if (link_info == NULL)
6669 /* Ignore this if we are not called by the ELF backend linker. */
6670 return;
6671
6672 globals = elf32_arm_hash_table (link_info);
6673 if (globals == NULL)
6674 return;
6675
6676 /* If blx is available then exported Thumb symbols are OK and there is
6677 nothing to do. */
6678 if (globals->use_blx)
6679 return;
6680
6681 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6682 link_info);
6683 }
6684
6685 /* Some relocations map to different relocations depending on the
6686 target. Return the real relocation. */
6687
6688 static int
6689 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6690 int r_type)
6691 {
6692 switch (r_type)
6693 {
6694 case R_ARM_TARGET1:
6695 if (globals->target1_is_rel)
6696 return R_ARM_REL32;
6697 else
6698 return R_ARM_ABS32;
6699
6700 case R_ARM_TARGET2:
6701 return globals->target2_reloc;
6702
6703 default:
6704 return r_type;
6705 }
6706 }
6707
6708 /* Return the base VMA address which should be subtracted from real addresses
6709 when resolving @dtpoff relocation.
6710 This is PT_TLS segment p_vaddr. */
6711
6712 static bfd_vma
6713 dtpoff_base (struct bfd_link_info *info)
6714 {
6715 /* If tls_sec is NULL, we should have signalled an error already. */
6716 if (elf_hash_table (info)->tls_sec == NULL)
6717 return 0;
6718 return elf_hash_table (info)->tls_sec->vma;
6719 }
6720
6721 /* Return the relocation value for @tpoff relocation
6722 if STT_TLS virtual address is ADDRESS. */
6723
6724 static bfd_vma
6725 tpoff (struct bfd_link_info *info, bfd_vma address)
6726 {
6727 struct elf_link_hash_table *htab = elf_hash_table (info);
6728 bfd_vma base;
6729
6730 /* If tls_sec is NULL, we should have signalled an error already. */
6731 if (htab->tls_sec == NULL)
6732 return 0;
6733 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6734 return address - htab->tls_sec->vma + base;
6735 }
6736
6737 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6738 VALUE is the relocation value. */
6739
6740 static bfd_reloc_status_type
6741 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6742 {
6743 if (value > 0xfff)
6744 return bfd_reloc_overflow;
6745
6746 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6747 bfd_put_32 (abfd, value, data);
6748 return bfd_reloc_ok;
6749 }
6750
6751 /* For a given value of n, calculate the value of G_n as required to
6752 deal with group relocations. We return it in the form of an
6753 encoded constant-and-rotation, together with the final residual. If n is
6754 specified as less than zero, then final_residual is filled with the
6755 input value and no further action is performed. */
6756
6757 static bfd_vma
6758 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6759 {
6760 int current_n;
6761 bfd_vma g_n;
6762 bfd_vma encoded_g_n = 0;
6763 bfd_vma residual = value; /* Also known as Y_n. */
6764
6765 for (current_n = 0; current_n <= n; current_n++)
6766 {
6767 int shift;
6768
6769 /* Calculate which part of the value to mask. */
6770 if (residual == 0)
6771 shift = 0;
6772 else
6773 {
6774 int msb;
6775
6776 /* Determine the most significant bit in the residual and
6777 align the resulting value to a 2-bit boundary. */
6778 for (msb = 30; msb >= 0; msb -= 2)
6779 if (residual & (3 << msb))
6780 break;
6781
6782 /* The desired shift is now (msb - 6), or zero, whichever
6783 is the greater. */
6784 shift = msb - 6;
6785 if (shift < 0)
6786 shift = 0;
6787 }
6788
6789 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6790 g_n = residual & (0xff << shift);
6791 encoded_g_n = (g_n >> shift)
6792 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6793
6794 /* Calculate the residual for the next time around. */
6795 residual &= ~g_n;
6796 }
6797
6798 *final_residual = residual;
6799
6800 return encoded_g_n;
6801 }
6802
6803 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6804 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6805
6806 static int
6807 identify_add_or_sub (bfd_vma insn)
6808 {
6809 int opcode = insn & 0x1e00000;
6810
6811 if (opcode == 1 << 23) /* ADD */
6812 return 1;
6813
6814 if (opcode == 1 << 22) /* SUB */
6815 return -1;
6816
6817 return 0;
6818 }
6819
6820 /* Perform a relocation as part of a final link. */
6821
6822 static bfd_reloc_status_type
6823 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6824 bfd * input_bfd,
6825 bfd * output_bfd,
6826 asection * input_section,
6827 bfd_byte * contents,
6828 Elf_Internal_Rela * rel,
6829 bfd_vma value,
6830 struct bfd_link_info * info,
6831 asection * sym_sec,
6832 const char * sym_name,
6833 int sym_flags,
6834 struct elf_link_hash_entry * h,
6835 bfd_boolean * unresolved_reloc_p,
6836 char ** error_message)
6837 {
6838 unsigned long r_type = howto->type;
6839 unsigned long r_symndx;
6840 bfd_byte * hit_data = contents + rel->r_offset;
6841 bfd * dynobj = NULL;
6842 Elf_Internal_Shdr * symtab_hdr;
6843 struct elf_link_hash_entry ** sym_hashes;
6844 bfd_vma * local_got_offsets;
6845 asection * sgot = NULL;
6846 asection * splt = NULL;
6847 asection * sreloc = NULL;
6848 bfd_vma addend;
6849 bfd_signed_vma signed_addend;
6850 struct elf32_arm_link_hash_table * globals;
6851
6852 globals = elf32_arm_hash_table (info);
6853 if (globals == NULL)
6854 return bfd_reloc_notsupported;
6855
6856 BFD_ASSERT (is_arm_elf (input_bfd));
6857
6858 /* Some relocation types map to different relocations depending on the
6859 target. We pick the right one here. */
6860 r_type = arm_real_reloc_type (globals, r_type);
6861 if (r_type != howto->type)
6862 howto = elf32_arm_howto_from_type (r_type);
6863
6864 /* If the start address has been set, then set the EF_ARM_HASENTRY
6865 flag. Setting this more than once is redundant, but the cost is
6866 not too high, and it keeps the code simple.
6867
6868 The test is done here, rather than somewhere else, because the
6869 start address is only set just before the final link commences.
6870
6871 Note - if the user deliberately sets a start address of 0, the
6872 flag will not be set. */
6873 if (bfd_get_start_address (output_bfd) != 0)
6874 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6875
6876 dynobj = elf_hash_table (info)->dynobj;
6877 if (dynobj)
6878 {
6879 sgot = bfd_get_section_by_name (dynobj, ".got");
6880 splt = bfd_get_section_by_name (dynobj, ".plt");
6881 }
6882 symtab_hdr = & elf_symtab_hdr (input_bfd);
6883 sym_hashes = elf_sym_hashes (input_bfd);
6884 local_got_offsets = elf_local_got_offsets (input_bfd);
6885 r_symndx = ELF32_R_SYM (rel->r_info);
6886
6887 if (globals->use_rel)
6888 {
6889 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6890
6891 if (addend & ((howto->src_mask + 1) >> 1))
6892 {
6893 signed_addend = -1;
6894 signed_addend &= ~ howto->src_mask;
6895 signed_addend |= addend;
6896 }
6897 else
6898 signed_addend = addend;
6899 }
6900 else
6901 addend = signed_addend = rel->r_addend;
6902
6903 switch (r_type)
6904 {
6905 case R_ARM_NONE:
6906 /* We don't need to find a value for this symbol. It's just a
6907 marker. */
6908 *unresolved_reloc_p = FALSE;
6909 return bfd_reloc_ok;
6910
6911 case R_ARM_ABS12:
6912 if (!globals->vxworks_p)
6913 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6914
6915 case R_ARM_PC24:
6916 case R_ARM_ABS32:
6917 case R_ARM_ABS32_NOI:
6918 case R_ARM_REL32:
6919 case R_ARM_REL32_NOI:
6920 case R_ARM_CALL:
6921 case R_ARM_JUMP24:
6922 case R_ARM_XPC25:
6923 case R_ARM_PREL31:
6924 case R_ARM_PLT32:
6925 /* Handle relocations which should use the PLT entry. ABS32/REL32
6926 will use the symbol's value, which may point to a PLT entry, but we
6927 don't need to handle that here. If we created a PLT entry, all
6928 branches in this object should go to it, except if the PLT is too
6929 far away, in which case a long branch stub should be inserted. */
6930 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6931 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6932 && r_type != R_ARM_CALL
6933 && r_type != R_ARM_JUMP24
6934 && r_type != R_ARM_PLT32)
6935 && h != NULL
6936 && splt != NULL
6937 && h->plt.offset != (bfd_vma) -1)
6938 {
6939 /* If we've created a .plt section, and assigned a PLT entry to
6940 this function, it should not be known to bind locally. If
6941 it were, we would have cleared the PLT entry. */
6942 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6943
6944 value = (splt->output_section->vma
6945 + splt->output_offset
6946 + h->plt.offset);
6947 *unresolved_reloc_p = FALSE;
6948 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6949 contents, rel->r_offset, value,
6950 rel->r_addend);
6951 }
6952
6953 /* When generating a shared object or relocatable executable, these
6954 relocations are copied into the output file to be resolved at
6955 run time. */
6956 if ((info->shared || globals->root.is_relocatable_executable)
6957 && (input_section->flags & SEC_ALLOC)
6958 && !(globals->vxworks_p
6959 && strcmp (input_section->output_section->name,
6960 ".tls_vars") == 0)
6961 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6962 || !SYMBOL_CALLS_LOCAL (info, h))
6963 && (!strstr (input_section->name, STUB_SUFFIX))
6964 && (h == NULL
6965 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6966 || h->root.type != bfd_link_hash_undefweak)
6967 && r_type != R_ARM_PC24
6968 && r_type != R_ARM_CALL
6969 && r_type != R_ARM_JUMP24
6970 && r_type != R_ARM_PREL31
6971 && r_type != R_ARM_PLT32)
6972 {
6973 Elf_Internal_Rela outrel;
6974 bfd_byte *loc;
6975 bfd_boolean skip, relocate;
6976
6977 *unresolved_reloc_p = FALSE;
6978
6979 if (sreloc == NULL)
6980 {
6981 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6982 ! globals->use_rel);
6983
6984 if (sreloc == NULL)
6985 return bfd_reloc_notsupported;
6986 }
6987
6988 skip = FALSE;
6989 relocate = FALSE;
6990
6991 outrel.r_addend = addend;
6992 outrel.r_offset =
6993 _bfd_elf_section_offset (output_bfd, info, input_section,
6994 rel->r_offset);
6995 if (outrel.r_offset == (bfd_vma) -1)
6996 skip = TRUE;
6997 else if (outrel.r_offset == (bfd_vma) -2)
6998 skip = TRUE, relocate = TRUE;
6999 outrel.r_offset += (input_section->output_section->vma
7000 + input_section->output_offset);
7001
7002 if (skip)
7003 memset (&outrel, 0, sizeof outrel);
7004 else if (h != NULL
7005 && h->dynindx != -1
7006 && (!info->shared
7007 || !info->symbolic
7008 || !h->def_regular))
7009 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
7010 else
7011 {
7012 int symbol;
7013
7014 /* This symbol is local, or marked to become local. */
7015 if (sym_flags == STT_ARM_TFUNC)
7016 value |= 1;
7017 if (globals->symbian_p)
7018 {
7019 asection *osec;
7020
7021 /* On Symbian OS, the data segment and text segement
7022 can be relocated independently. Therefore, we
7023 must indicate the segment to which this
7024 relocation is relative. The BPABI allows us to
7025 use any symbol in the right segment; we just use
7026 the section symbol as it is convenient. (We
7027 cannot use the symbol given by "h" directly as it
7028 will not appear in the dynamic symbol table.)
7029
7030 Note that the dynamic linker ignores the section
7031 symbol value, so we don't subtract osec->vma
7032 from the emitted reloc addend. */
7033 if (sym_sec)
7034 osec = sym_sec->output_section;
7035 else
7036 osec = input_section->output_section;
7037 symbol = elf_section_data (osec)->dynindx;
7038 if (symbol == 0)
7039 {
7040 struct elf_link_hash_table *htab = elf_hash_table (info);
7041
7042 if ((osec->flags & SEC_READONLY) == 0
7043 && htab->data_index_section != NULL)
7044 osec = htab->data_index_section;
7045 else
7046 osec = htab->text_index_section;
7047 symbol = elf_section_data (osec)->dynindx;
7048 }
7049 BFD_ASSERT (symbol != 0);
7050 }
7051 else
7052 /* On SVR4-ish systems, the dynamic loader cannot
7053 relocate the text and data segments independently,
7054 so the symbol does not matter. */
7055 symbol = 0;
7056 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7057 if (globals->use_rel)
7058 relocate = TRUE;
7059 else
7060 outrel.r_addend += value;
7061 }
7062
7063 loc = sreloc->contents;
7064 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7065 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7066
7067 /* If this reloc is against an external symbol, we do not want to
7068 fiddle with the addend. Otherwise, we need to include the symbol
7069 value so that it becomes an addend for the dynamic reloc. */
7070 if (! relocate)
7071 return bfd_reloc_ok;
7072
7073 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7074 contents, rel->r_offset, value,
7075 (bfd_vma) 0);
7076 }
7077 else switch (r_type)
7078 {
7079 case R_ARM_ABS12:
7080 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7081
7082 case R_ARM_XPC25: /* Arm BLX instruction. */
7083 case R_ARM_CALL:
7084 case R_ARM_JUMP24:
7085 case R_ARM_PC24: /* Arm B/BL instruction. */
7086 case R_ARM_PLT32:
7087 {
7088 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7089
7090 if (r_type == R_ARM_XPC25)
7091 {
7092 /* Check for Arm calling Arm function. */
7093 /* FIXME: Should we translate the instruction into a BL
7094 instruction instead ? */
7095 if (sym_flags != STT_ARM_TFUNC)
7096 (*_bfd_error_handler)
7097 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7098 input_bfd,
7099 h ? h->root.root.string : "(local)");
7100 }
7101 else if (r_type == R_ARM_PC24)
7102 {
7103 /* Check for Arm calling Thumb function. */
7104 if (sym_flags == STT_ARM_TFUNC)
7105 {
7106 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7107 output_bfd, input_section,
7108 hit_data, sym_sec, rel->r_offset,
7109 signed_addend, value,
7110 error_message))
7111 return bfd_reloc_ok;
7112 else
7113 return bfd_reloc_dangerous;
7114 }
7115 }
7116
7117 /* Check if a stub has to be inserted because the
7118 destination is too far or we are changing mode. */
7119 if ( r_type == R_ARM_CALL
7120 || r_type == R_ARM_JUMP24
7121 || r_type == R_ARM_PLT32)
7122 {
7123 enum elf32_arm_stub_type stub_type = arm_stub_none;
7124 struct elf32_arm_link_hash_entry *hash;
7125
7126 hash = (struct elf32_arm_link_hash_entry *) h;
7127 stub_type = arm_type_of_stub (info, input_section, rel,
7128 &sym_flags, hash,
7129 value, sym_sec,
7130 input_bfd, sym_name);
7131
7132 if (stub_type != arm_stub_none)
7133 {
7134 /* The target is out of reach, so redirect the
7135 branch to the local stub for this function. */
7136
7137 stub_entry = elf32_arm_get_stub_entry (input_section,
7138 sym_sec, h,
7139 rel, globals,
7140 stub_type);
7141 if (stub_entry != NULL)
7142 value = (stub_entry->stub_offset
7143 + stub_entry->stub_sec->output_offset
7144 + stub_entry->stub_sec->output_section->vma);
7145 }
7146 else
7147 {
7148 /* If the call goes through a PLT entry, make sure to
7149 check distance to the right destination address. */
7150 if (h != NULL
7151 && splt != NULL
7152 && h->plt.offset != (bfd_vma) -1)
7153 {
7154 value = (splt->output_section->vma
7155 + splt->output_offset
7156 + h->plt.offset);
7157 *unresolved_reloc_p = FALSE;
7158 /* The PLT entry is in ARM mode, regardless of the
7159 target function. */
7160 sym_flags = STT_FUNC;
7161 }
7162 }
7163 }
7164
7165 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7166 where:
7167 S is the address of the symbol in the relocation.
7168 P is address of the instruction being relocated.
7169 A is the addend (extracted from the instruction) in bytes.
7170
7171 S is held in 'value'.
7172 P is the base address of the section containing the
7173 instruction plus the offset of the reloc into that
7174 section, ie:
7175 (input_section->output_section->vma +
7176 input_section->output_offset +
7177 rel->r_offset).
7178 A is the addend, converted into bytes, ie:
7179 (signed_addend * 4)
7180
7181 Note: None of these operations have knowledge of the pipeline
7182 size of the processor, thus it is up to the assembler to
7183 encode this information into the addend. */
7184 value -= (input_section->output_section->vma
7185 + input_section->output_offset);
7186 value -= rel->r_offset;
7187 if (globals->use_rel)
7188 value += (signed_addend << howto->size);
7189 else
7190 /* RELA addends do not have to be adjusted by howto->size. */
7191 value += signed_addend;
7192
7193 signed_addend = value;
7194 signed_addend >>= howto->rightshift;
7195
7196 /* A branch to an undefined weak symbol is turned into a jump to
7197 the next instruction unless a PLT entry will be created.
7198 Do the same for local undefined symbols.
7199 The jump to the next instruction is optimized as a NOP depending
7200 on the architecture. */
7201 if (h ? (h->root.type == bfd_link_hash_undefweak
7202 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7203 : bfd_is_und_section (sym_sec))
7204 {
7205 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7206
7207 if (arch_has_arm_nop (globals))
7208 value |= 0x0320f000;
7209 else
7210 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7211 }
7212 else
7213 {
7214 /* Perform a signed range check. */
7215 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7216 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7217 return bfd_reloc_overflow;
7218
7219 addend = (value & 2);
7220
7221 value = (signed_addend & howto->dst_mask)
7222 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7223
7224 if (r_type == R_ARM_CALL)
7225 {
7226 /* Set the H bit in the BLX instruction. */
7227 if (sym_flags == STT_ARM_TFUNC)
7228 {
7229 if (addend)
7230 value |= (1 << 24);
7231 else
7232 value &= ~(bfd_vma)(1 << 24);
7233 }
7234
7235 /* Select the correct instruction (BL or BLX). */
7236 /* Only if we are not handling a BL to a stub. In this
7237 case, mode switching is performed by the stub. */
7238 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7239 value |= (1 << 28);
7240 else
7241 {
7242 value &= ~(bfd_vma)(1 << 28);
7243 value |= (1 << 24);
7244 }
7245 }
7246 }
7247 }
7248 break;
7249
7250 case R_ARM_ABS32:
7251 value += addend;
7252 if (sym_flags == STT_ARM_TFUNC)
7253 value |= 1;
7254 break;
7255
7256 case R_ARM_ABS32_NOI:
7257 value += addend;
7258 break;
7259
7260 case R_ARM_REL32:
7261 value += addend;
7262 if (sym_flags == STT_ARM_TFUNC)
7263 value |= 1;
7264 value -= (input_section->output_section->vma
7265 + input_section->output_offset + rel->r_offset);
7266 break;
7267
7268 case R_ARM_REL32_NOI:
7269 value += addend;
7270 value -= (input_section->output_section->vma
7271 + input_section->output_offset + rel->r_offset);
7272 break;
7273
7274 case R_ARM_PREL31:
7275 value -= (input_section->output_section->vma
7276 + input_section->output_offset + rel->r_offset);
7277 value += signed_addend;
7278 if (! h || h->root.type != bfd_link_hash_undefweak)
7279 {
7280 /* Check for overflow. */
7281 if ((value ^ (value >> 1)) & (1 << 30))
7282 return bfd_reloc_overflow;
7283 }
7284 value &= 0x7fffffff;
7285 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7286 if (sym_flags == STT_ARM_TFUNC)
7287 value |= 1;
7288 break;
7289 }
7290
7291 bfd_put_32 (input_bfd, value, hit_data);
7292 return bfd_reloc_ok;
7293
7294 case R_ARM_ABS8:
7295 value += addend;
7296
7297 /* There is no way to tell whether the user intended to use a signed or
7298 unsigned addend. When checking for overflow we accept either,
7299 as specified by the AAELF. */
7300 if ((long) value > 0xff || (long) value < -0x80)
7301 return bfd_reloc_overflow;
7302
7303 bfd_put_8 (input_bfd, value, hit_data);
7304 return bfd_reloc_ok;
7305
7306 case R_ARM_ABS16:
7307 value += addend;
7308
7309 /* See comment for R_ARM_ABS8. */
7310 if ((long) value > 0xffff || (long) value < -0x8000)
7311 return bfd_reloc_overflow;
7312
7313 bfd_put_16 (input_bfd, value, hit_data);
7314 return bfd_reloc_ok;
7315
7316 case R_ARM_THM_ABS5:
7317 /* Support ldr and str instructions for the thumb. */
7318 if (globals->use_rel)
7319 {
7320 /* Need to refetch addend. */
7321 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7322 /* ??? Need to determine shift amount from operand size. */
7323 addend >>= howto->rightshift;
7324 }
7325 value += addend;
7326
7327 /* ??? Isn't value unsigned? */
7328 if ((long) value > 0x1f || (long) value < -0x10)
7329 return bfd_reloc_overflow;
7330
7331 /* ??? Value needs to be properly shifted into place first. */
7332 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7333 bfd_put_16 (input_bfd, value, hit_data);
7334 return bfd_reloc_ok;
7335
7336 case R_ARM_THM_ALU_PREL_11_0:
7337 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7338 {
7339 bfd_vma insn;
7340 bfd_signed_vma relocation;
7341
7342 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7343 | bfd_get_16 (input_bfd, hit_data + 2);
7344
7345 if (globals->use_rel)
7346 {
7347 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7348 | ((insn & (1 << 26)) >> 15);
7349 if (insn & 0xf00000)
7350 signed_addend = -signed_addend;
7351 }
7352
7353 relocation = value + signed_addend;
7354 relocation -= (input_section->output_section->vma
7355 + input_section->output_offset
7356 + rel->r_offset);
7357
7358 value = abs (relocation);
7359
7360 if (value >= 0x1000)
7361 return bfd_reloc_overflow;
7362
7363 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7364 | ((value & 0x700) << 4)
7365 | ((value & 0x800) << 15);
7366 if (relocation < 0)
7367 insn |= 0xa00000;
7368
7369 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7370 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7371
7372 return bfd_reloc_ok;
7373 }
7374
7375 case R_ARM_THM_PC8:
7376 /* PR 10073: This reloc is not generated by the GNU toolchain,
7377 but it is supported for compatibility with third party libraries
7378 generated by other compilers, specifically the ARM/IAR. */
7379 {
7380 bfd_vma insn;
7381 bfd_signed_vma relocation;
7382
7383 insn = bfd_get_16 (input_bfd, hit_data);
7384
7385 if (globals->use_rel)
7386 addend = (insn & 0x00ff) << 2;
7387
7388 relocation = value + addend;
7389 relocation -= (input_section->output_section->vma
7390 + input_section->output_offset
7391 + rel->r_offset);
7392
7393 value = abs (relocation);
7394
7395 /* We do not check for overflow of this reloc. Although strictly
7396 speaking this is incorrect, it appears to be necessary in order
7397 to work with IAR generated relocs. Since GCC and GAS do not
7398 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7399 a problem for them. */
7400 value &= 0x3fc;
7401
7402 insn = (insn & 0xff00) | (value >> 2);
7403
7404 bfd_put_16 (input_bfd, insn, hit_data);
7405
7406 return bfd_reloc_ok;
7407 }
7408
7409 case R_ARM_THM_PC12:
7410 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7411 {
7412 bfd_vma insn;
7413 bfd_signed_vma relocation;
7414
7415 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7416 | bfd_get_16 (input_bfd, hit_data + 2);
7417
7418 if (globals->use_rel)
7419 {
7420 signed_addend = insn & 0xfff;
7421 if (!(insn & (1 << 23)))
7422 signed_addend = -signed_addend;
7423 }
7424
7425 relocation = value + signed_addend;
7426 relocation -= (input_section->output_section->vma
7427 + input_section->output_offset
7428 + rel->r_offset);
7429
7430 value = abs (relocation);
7431
7432 if (value >= 0x1000)
7433 return bfd_reloc_overflow;
7434
7435 insn = (insn & 0xff7ff000) | value;
7436 if (relocation >= 0)
7437 insn |= (1 << 23);
7438
7439 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7440 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7441
7442 return bfd_reloc_ok;
7443 }
7444
7445 case R_ARM_THM_XPC22:
7446 case R_ARM_THM_CALL:
7447 case R_ARM_THM_JUMP24:
7448 /* Thumb BL (branch long instruction). */
7449 {
7450 bfd_vma relocation;
7451 bfd_vma reloc_sign;
7452 bfd_boolean overflow = FALSE;
7453 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7454 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7455 bfd_signed_vma reloc_signed_max;
7456 bfd_signed_vma reloc_signed_min;
7457 bfd_vma check;
7458 bfd_signed_vma signed_check;
7459 int bitsize;
7460 const int thumb2 = using_thumb2 (globals);
7461
7462 /* A branch to an undefined weak symbol is turned into a jump to
7463 the next instruction unless a PLT entry will be created.
7464 The jump to the next instruction is optimized as a NOP.W for
7465 Thumb-2 enabled architectures. */
7466 if (h && h->root.type == bfd_link_hash_undefweak
7467 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7468 {
7469 if (arch_has_thumb2_nop (globals))
7470 {
7471 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7472 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7473 }
7474 else
7475 {
7476 bfd_put_16 (input_bfd, 0xe000, hit_data);
7477 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7478 }
7479 return bfd_reloc_ok;
7480 }
7481
7482 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7483 with Thumb-1) involving the J1 and J2 bits. */
7484 if (globals->use_rel)
7485 {
7486 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7487 bfd_vma upper = upper_insn & 0x3ff;
7488 bfd_vma lower = lower_insn & 0x7ff;
7489 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7490 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7491 bfd_vma i1 = j1 ^ s ? 0 : 1;
7492 bfd_vma i2 = j2 ^ s ? 0 : 1;
7493
7494 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7495 /* Sign extend. */
7496 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7497
7498 signed_addend = addend;
7499 }
7500
7501 if (r_type == R_ARM_THM_XPC22)
7502 {
7503 /* Check for Thumb to Thumb call. */
7504 /* FIXME: Should we translate the instruction into a BL
7505 instruction instead ? */
7506 if (sym_flags == STT_ARM_TFUNC)
7507 (*_bfd_error_handler)
7508 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7509 input_bfd,
7510 h ? h->root.root.string : "(local)");
7511 }
7512 else
7513 {
7514 /* If it is not a call to Thumb, assume call to Arm.
7515 If it is a call relative to a section name, then it is not a
7516 function call at all, but rather a long jump. Calls through
7517 the PLT do not require stubs. */
7518 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7519 && (h == NULL || splt == NULL
7520 || h->plt.offset == (bfd_vma) -1))
7521 {
7522 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7523 {
7524 /* Convert BL to BLX. */
7525 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7526 }
7527 else if (( r_type != R_ARM_THM_CALL)
7528 && (r_type != R_ARM_THM_JUMP24))
7529 {
7530 if (elf32_thumb_to_arm_stub
7531 (info, sym_name, input_bfd, output_bfd, input_section,
7532 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7533 error_message))
7534 return bfd_reloc_ok;
7535 else
7536 return bfd_reloc_dangerous;
7537 }
7538 }
7539 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7540 && r_type == R_ARM_THM_CALL)
7541 {
7542 /* Make sure this is a BL. */
7543 lower_insn |= 0x1800;
7544 }
7545 }
7546
7547 enum elf32_arm_stub_type stub_type = arm_stub_none;
7548 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7549 {
7550 /* Check if a stub has to be inserted because the destination
7551 is too far. */
7552 struct elf32_arm_stub_hash_entry *stub_entry;
7553 struct elf32_arm_link_hash_entry *hash;
7554
7555 hash = (struct elf32_arm_link_hash_entry *) h;
7556
7557 stub_type = arm_type_of_stub (info, input_section, rel,
7558 &sym_flags, hash, value, sym_sec,
7559 input_bfd, sym_name);
7560
7561 if (stub_type != arm_stub_none)
7562 {
7563 /* The target is out of reach or we are changing modes, so
7564 redirect the branch to the local stub for this
7565 function. */
7566 stub_entry = elf32_arm_get_stub_entry (input_section,
7567 sym_sec, h,
7568 rel, globals,
7569 stub_type);
7570 if (stub_entry != NULL)
7571 value = (stub_entry->stub_offset
7572 + stub_entry->stub_sec->output_offset
7573 + stub_entry->stub_sec->output_section->vma);
7574
7575 /* If this call becomes a call to Arm, force BLX. */
7576 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7577 {
7578 if ((stub_entry
7579 && !arm_stub_is_thumb (stub_entry->stub_type))
7580 || (sym_flags != STT_ARM_TFUNC))
7581 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7582 }
7583 }
7584 }
7585
7586 /* Handle calls via the PLT. */
7587 if (stub_type == arm_stub_none
7588 && h != NULL
7589 && splt != NULL
7590 && h->plt.offset != (bfd_vma) -1)
7591 {
7592 value = (splt->output_section->vma
7593 + splt->output_offset
7594 + h->plt.offset);
7595
7596 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7597 {
7598 /* If the Thumb BLX instruction is available, convert
7599 the BL to a BLX instruction to call the ARM-mode
7600 PLT entry. */
7601 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7602 sym_flags = STT_FUNC;
7603 }
7604 else
7605 {
7606 /* Target the Thumb stub before the ARM PLT entry. */
7607 value -= PLT_THUMB_STUB_SIZE;
7608 sym_flags = STT_ARM_TFUNC;
7609 }
7610 *unresolved_reloc_p = FALSE;
7611 }
7612
7613 relocation = value + signed_addend;
7614
7615 relocation -= (input_section->output_section->vma
7616 + input_section->output_offset
7617 + rel->r_offset);
7618
7619 check = relocation >> howto->rightshift;
7620
7621 /* If this is a signed value, the rightshift just dropped
7622 leading 1 bits (assuming twos complement). */
7623 if ((bfd_signed_vma) relocation >= 0)
7624 signed_check = check;
7625 else
7626 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7627
7628 /* Calculate the permissable maximum and minimum values for
7629 this relocation according to whether we're relocating for
7630 Thumb-2 or not. */
7631 bitsize = howto->bitsize;
7632 if (!thumb2)
7633 bitsize -= 2;
7634 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7635 reloc_signed_min = ~reloc_signed_max;
7636
7637 /* Assumes two's complement. */
7638 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7639 overflow = TRUE;
7640
7641 if ((lower_insn & 0x5000) == 0x4000)
7642 /* For a BLX instruction, make sure that the relocation is rounded up
7643 to a word boundary. This follows the semantics of the instruction
7644 which specifies that bit 1 of the target address will come from bit
7645 1 of the base address. */
7646 relocation = (relocation + 2) & ~ 3;
7647
7648 /* Put RELOCATION back into the insn. Assumes two's complement.
7649 We use the Thumb-2 encoding, which is safe even if dealing with
7650 a Thumb-1 instruction by virtue of our overflow check above. */
7651 reloc_sign = (signed_check < 0) ? 1 : 0;
7652 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7653 | ((relocation >> 12) & 0x3ff)
7654 | (reloc_sign << 10);
7655 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7656 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7657 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7658 | ((relocation >> 1) & 0x7ff);
7659
7660 /* Put the relocated value back in the object file: */
7661 bfd_put_16 (input_bfd, upper_insn, hit_data);
7662 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7663
7664 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7665 }
7666 break;
7667
7668 case R_ARM_THM_JUMP19:
7669 /* Thumb32 conditional branch instruction. */
7670 {
7671 bfd_vma relocation;
7672 bfd_boolean overflow = FALSE;
7673 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7674 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7675 bfd_signed_vma reloc_signed_max = 0xffffe;
7676 bfd_signed_vma reloc_signed_min = -0x100000;
7677 bfd_signed_vma signed_check;
7678
7679 /* Need to refetch the addend, reconstruct the top three bits,
7680 and squish the two 11 bit pieces together. */
7681 if (globals->use_rel)
7682 {
7683 bfd_vma S = (upper_insn & 0x0400) >> 10;
7684 bfd_vma upper = (upper_insn & 0x003f);
7685 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7686 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7687 bfd_vma lower = (lower_insn & 0x07ff);
7688
7689 upper |= J1 << 6;
7690 upper |= J2 << 7;
7691 upper |= (!S) << 8;
7692 upper -= 0x0100; /* Sign extend. */
7693
7694 addend = (upper << 12) | (lower << 1);
7695 signed_addend = addend;
7696 }
7697
7698 /* Handle calls via the PLT. */
7699 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7700 {
7701 value = (splt->output_section->vma
7702 + splt->output_offset
7703 + h->plt.offset);
7704 /* Target the Thumb stub before the ARM PLT entry. */
7705 value -= PLT_THUMB_STUB_SIZE;
7706 *unresolved_reloc_p = FALSE;
7707 }
7708
7709 /* ??? Should handle interworking? GCC might someday try to
7710 use this for tail calls. */
7711
7712 relocation = value + signed_addend;
7713 relocation -= (input_section->output_section->vma
7714 + input_section->output_offset
7715 + rel->r_offset);
7716 signed_check = (bfd_signed_vma) relocation;
7717
7718 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7719 overflow = TRUE;
7720
7721 /* Put RELOCATION back into the insn. */
7722 {
7723 bfd_vma S = (relocation & 0x00100000) >> 20;
7724 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7725 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7726 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7727 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7728
7729 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7730 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7731 }
7732
7733 /* Put the relocated value back in the object file: */
7734 bfd_put_16 (input_bfd, upper_insn, hit_data);
7735 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7736
7737 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7738 }
7739
7740 case R_ARM_THM_JUMP11:
7741 case R_ARM_THM_JUMP8:
7742 case R_ARM_THM_JUMP6:
7743 /* Thumb B (branch) instruction). */
7744 {
7745 bfd_signed_vma relocation;
7746 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7747 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7748 bfd_signed_vma signed_check;
7749
7750 /* CZB cannot jump backward. */
7751 if (r_type == R_ARM_THM_JUMP6)
7752 reloc_signed_min = 0;
7753
7754 if (globals->use_rel)
7755 {
7756 /* Need to refetch addend. */
7757 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7758 if (addend & ((howto->src_mask + 1) >> 1))
7759 {
7760 signed_addend = -1;
7761 signed_addend &= ~ howto->src_mask;
7762 signed_addend |= addend;
7763 }
7764 else
7765 signed_addend = addend;
7766 /* The value in the insn has been right shifted. We need to
7767 undo this, so that we can perform the address calculation
7768 in terms of bytes. */
7769 signed_addend <<= howto->rightshift;
7770 }
7771 relocation = value + signed_addend;
7772
7773 relocation -= (input_section->output_section->vma
7774 + input_section->output_offset
7775 + rel->r_offset);
7776
7777 relocation >>= howto->rightshift;
7778 signed_check = relocation;
7779
7780 if (r_type == R_ARM_THM_JUMP6)
7781 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7782 else
7783 relocation &= howto->dst_mask;
7784 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7785
7786 bfd_put_16 (input_bfd, relocation, hit_data);
7787
7788 /* Assumes two's complement. */
7789 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7790 return bfd_reloc_overflow;
7791
7792 return bfd_reloc_ok;
7793 }
7794
7795 case R_ARM_ALU_PCREL7_0:
7796 case R_ARM_ALU_PCREL15_8:
7797 case R_ARM_ALU_PCREL23_15:
7798 {
7799 bfd_vma insn;
7800 bfd_vma relocation;
7801
7802 insn = bfd_get_32 (input_bfd, hit_data);
7803 if (globals->use_rel)
7804 {
7805 /* Extract the addend. */
7806 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7807 signed_addend = addend;
7808 }
7809 relocation = value + signed_addend;
7810
7811 relocation -= (input_section->output_section->vma
7812 + input_section->output_offset
7813 + rel->r_offset);
7814 insn = (insn & ~0xfff)
7815 | ((howto->bitpos << 7) & 0xf00)
7816 | ((relocation >> howto->bitpos) & 0xff);
7817 bfd_put_32 (input_bfd, value, hit_data);
7818 }
7819 return bfd_reloc_ok;
7820
7821 case R_ARM_GNU_VTINHERIT:
7822 case R_ARM_GNU_VTENTRY:
7823 return bfd_reloc_ok;
7824
7825 case R_ARM_GOTOFF32:
7826 /* Relocation is relative to the start of the
7827 global offset table. */
7828
7829 BFD_ASSERT (sgot != NULL);
7830 if (sgot == NULL)
7831 return bfd_reloc_notsupported;
7832
7833 /* If we are addressing a Thumb function, we need to adjust the
7834 address by one, so that attempts to call the function pointer will
7835 correctly interpret it as Thumb code. */
7836 if (sym_flags == STT_ARM_TFUNC)
7837 value += 1;
7838
7839 /* Note that sgot->output_offset is not involved in this
7840 calculation. We always want the start of .got. If we
7841 define _GLOBAL_OFFSET_TABLE in a different way, as is
7842 permitted by the ABI, we might have to change this
7843 calculation. */
7844 value -= sgot->output_section->vma;
7845 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7846 contents, rel->r_offset, value,
7847 rel->r_addend);
7848
7849 case R_ARM_GOTPC:
7850 /* Use global offset table as symbol value. */
7851 BFD_ASSERT (sgot != NULL);
7852
7853 if (sgot == NULL)
7854 return bfd_reloc_notsupported;
7855
7856 *unresolved_reloc_p = FALSE;
7857 value = sgot->output_section->vma;
7858 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7859 contents, rel->r_offset, value,
7860 rel->r_addend);
7861
7862 case R_ARM_GOT32:
7863 case R_ARM_GOT_PREL:
7864 /* Relocation is to the entry for this symbol in the
7865 global offset table. */
7866 if (sgot == NULL)
7867 return bfd_reloc_notsupported;
7868
7869 if (h != NULL)
7870 {
7871 bfd_vma off;
7872 bfd_boolean dyn;
7873
7874 off = h->got.offset;
7875 BFD_ASSERT (off != (bfd_vma) -1);
7876 dyn = globals->root.dynamic_sections_created;
7877
7878 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7879 || (info->shared
7880 && SYMBOL_REFERENCES_LOCAL (info, h))
7881 || (ELF_ST_VISIBILITY (h->other)
7882 && h->root.type == bfd_link_hash_undefweak))
7883 {
7884 /* This is actually a static link, or it is a -Bsymbolic link
7885 and the symbol is defined locally. We must initialize this
7886 entry in the global offset table. Since the offset must
7887 always be a multiple of 4, we use the least significant bit
7888 to record whether we have initialized it already.
7889
7890 When doing a dynamic link, we create a .rel(a).got relocation
7891 entry to initialize the value. This is done in the
7892 finish_dynamic_symbol routine. */
7893 if ((off & 1) != 0)
7894 off &= ~1;
7895 else
7896 {
7897 /* If we are addressing a Thumb function, we need to
7898 adjust the address by one, so that attempts to
7899 call the function pointer will correctly
7900 interpret it as Thumb code. */
7901 if (sym_flags == STT_ARM_TFUNC)
7902 value |= 1;
7903
7904 bfd_put_32 (output_bfd, value, sgot->contents + off);
7905 h->got.offset |= 1;
7906 }
7907 }
7908 else
7909 *unresolved_reloc_p = FALSE;
7910
7911 value = sgot->output_offset + off;
7912 }
7913 else
7914 {
7915 bfd_vma off;
7916
7917 BFD_ASSERT (local_got_offsets != NULL &&
7918 local_got_offsets[r_symndx] != (bfd_vma) -1);
7919
7920 off = local_got_offsets[r_symndx];
7921
7922 /* The offset must always be a multiple of 4. We use the
7923 least significant bit to record whether we have already
7924 generated the necessary reloc. */
7925 if ((off & 1) != 0)
7926 off &= ~1;
7927 else
7928 {
7929 /* If we are addressing a Thumb function, we need to
7930 adjust the address by one, so that attempts to
7931 call the function pointer will correctly
7932 interpret it as Thumb code. */
7933 if (sym_flags == STT_ARM_TFUNC)
7934 value |= 1;
7935
7936 if (globals->use_rel)
7937 bfd_put_32 (output_bfd, value, sgot->contents + off);
7938
7939 if (info->shared)
7940 {
7941 asection * srelgot;
7942 Elf_Internal_Rela outrel;
7943 bfd_byte *loc;
7944
7945 srelgot = (bfd_get_section_by_name
7946 (dynobj, RELOC_SECTION (globals, ".got")));
7947 BFD_ASSERT (srelgot != NULL);
7948
7949 outrel.r_addend = addend + value;
7950 outrel.r_offset = (sgot->output_section->vma
7951 + sgot->output_offset
7952 + off);
7953 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7954 loc = srelgot->contents;
7955 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7956 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7957 }
7958
7959 local_got_offsets[r_symndx] |= 1;
7960 }
7961
7962 value = sgot->output_offset + off;
7963 }
7964 if (r_type != R_ARM_GOT32)
7965 value += sgot->output_section->vma;
7966
7967 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7968 contents, rel->r_offset, value,
7969 rel->r_addend);
7970
7971 case R_ARM_TLS_LDO32:
7972 value = value - dtpoff_base (info);
7973
7974 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7975 contents, rel->r_offset, value,
7976 rel->r_addend);
7977
7978 case R_ARM_TLS_LDM32:
7979 {
7980 bfd_vma off;
7981
7982 if (globals->sgot == NULL)
7983 abort ();
7984
7985 off = globals->tls_ldm_got.offset;
7986
7987 if ((off & 1) != 0)
7988 off &= ~1;
7989 else
7990 {
7991 /* If we don't know the module number, create a relocation
7992 for it. */
7993 if (info->shared)
7994 {
7995 Elf_Internal_Rela outrel;
7996 bfd_byte *loc;
7997
7998 if (globals->srelgot == NULL)
7999 abort ();
8000
8001 outrel.r_addend = 0;
8002 outrel.r_offset = (globals->sgot->output_section->vma
8003 + globals->sgot->output_offset + off);
8004 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
8005
8006 if (globals->use_rel)
8007 bfd_put_32 (output_bfd, outrel.r_addend,
8008 globals->sgot->contents + off);
8009
8010 loc = globals->srelgot->contents;
8011 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
8012 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8013 }
8014 else
8015 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
8016
8017 globals->tls_ldm_got.offset |= 1;
8018 }
8019
8020 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8021 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8022
8023 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8024 contents, rel->r_offset, value,
8025 rel->r_addend);
8026 }
8027
8028 case R_ARM_TLS_GD32:
8029 case R_ARM_TLS_IE32:
8030 {
8031 bfd_vma off;
8032 int indx;
8033 char tls_type;
8034
8035 if (globals->sgot == NULL)
8036 abort ();
8037
8038 indx = 0;
8039 if (h != NULL)
8040 {
8041 bfd_boolean dyn;
8042 dyn = globals->root.dynamic_sections_created;
8043 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8044 && (!info->shared
8045 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8046 {
8047 *unresolved_reloc_p = FALSE;
8048 indx = h->dynindx;
8049 }
8050 off = h->got.offset;
8051 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8052 }
8053 else
8054 {
8055 if (local_got_offsets == NULL)
8056 abort ();
8057 off = local_got_offsets[r_symndx];
8058 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8059 }
8060
8061 if (tls_type == GOT_UNKNOWN)
8062 abort ();
8063
8064 if ((off & 1) != 0)
8065 off &= ~1;
8066 else
8067 {
8068 bfd_boolean need_relocs = FALSE;
8069 Elf_Internal_Rela outrel;
8070 bfd_byte *loc = NULL;
8071 int cur_off = off;
8072
8073 /* The GOT entries have not been initialized yet. Do it
8074 now, and emit any relocations. If both an IE GOT and a
8075 GD GOT are necessary, we emit the GD first. */
8076
8077 if ((info->shared || indx != 0)
8078 && (h == NULL
8079 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8080 || h->root.type != bfd_link_hash_undefweak))
8081 {
8082 need_relocs = TRUE;
8083 if (globals->srelgot == NULL)
8084 abort ();
8085 loc = globals->srelgot->contents;
8086 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8087 }
8088
8089 if (tls_type & GOT_TLS_GD)
8090 {
8091 if (need_relocs)
8092 {
8093 outrel.r_addend = 0;
8094 outrel.r_offset = (globals->sgot->output_section->vma
8095 + globals->sgot->output_offset
8096 + cur_off);
8097 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8098
8099 if (globals->use_rel)
8100 bfd_put_32 (output_bfd, outrel.r_addend,
8101 globals->sgot->contents + cur_off);
8102
8103 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8104 globals->srelgot->reloc_count++;
8105 loc += RELOC_SIZE (globals);
8106
8107 if (indx == 0)
8108 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8109 globals->sgot->contents + cur_off + 4);
8110 else
8111 {
8112 outrel.r_addend = 0;
8113 outrel.r_info = ELF32_R_INFO (indx,
8114 R_ARM_TLS_DTPOFF32);
8115 outrel.r_offset += 4;
8116
8117 if (globals->use_rel)
8118 bfd_put_32 (output_bfd, outrel.r_addend,
8119 globals->sgot->contents + cur_off + 4);
8120
8121
8122 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8123 globals->srelgot->reloc_count++;
8124 loc += RELOC_SIZE (globals);
8125 }
8126 }
8127 else
8128 {
8129 /* If we are not emitting relocations for a
8130 general dynamic reference, then we must be in a
8131 static link or an executable link with the
8132 symbol binding locally. Mark it as belonging
8133 to module 1, the executable. */
8134 bfd_put_32 (output_bfd, 1,
8135 globals->sgot->contents + cur_off);
8136 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8137 globals->sgot->contents + cur_off + 4);
8138 }
8139
8140 cur_off += 8;
8141 }
8142
8143 if (tls_type & GOT_TLS_IE)
8144 {
8145 if (need_relocs)
8146 {
8147 if (indx == 0)
8148 outrel.r_addend = value - dtpoff_base (info);
8149 else
8150 outrel.r_addend = 0;
8151 outrel.r_offset = (globals->sgot->output_section->vma
8152 + globals->sgot->output_offset
8153 + cur_off);
8154 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8155
8156 if (globals->use_rel)
8157 bfd_put_32 (output_bfd, outrel.r_addend,
8158 globals->sgot->contents + cur_off);
8159
8160 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8161 globals->srelgot->reloc_count++;
8162 loc += RELOC_SIZE (globals);
8163 }
8164 else
8165 bfd_put_32 (output_bfd, tpoff (info, value),
8166 globals->sgot->contents + cur_off);
8167 cur_off += 4;
8168 }
8169
8170 if (h != NULL)
8171 h->got.offset |= 1;
8172 else
8173 local_got_offsets[r_symndx] |= 1;
8174 }
8175
8176 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8177 off += 8;
8178 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8179 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8180
8181 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8182 contents, rel->r_offset, value,
8183 rel->r_addend);
8184 }
8185
8186 case R_ARM_TLS_LE32:
8187 if (info->shared)
8188 {
8189 (*_bfd_error_handler)
8190 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8191 input_bfd, input_section,
8192 (long) rel->r_offset, howto->name);
8193 return (bfd_reloc_status_type) FALSE;
8194 }
8195 else
8196 value = tpoff (info, value);
8197
8198 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8199 contents, rel->r_offset, value,
8200 rel->r_addend);
8201
8202 case R_ARM_V4BX:
8203 if (globals->fix_v4bx)
8204 {
8205 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8206
8207 /* Ensure that we have a BX instruction. */
8208 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8209
8210 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8211 {
8212 /* Branch to veneer. */
8213 bfd_vma glue_addr;
8214 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8215 glue_addr -= input_section->output_section->vma
8216 + input_section->output_offset
8217 + rel->r_offset + 8;
8218 insn = (insn & 0xf0000000) | 0x0a000000
8219 | ((glue_addr >> 2) & 0x00ffffff);
8220 }
8221 else
8222 {
8223 /* Preserve Rm (lowest four bits) and the condition code
8224 (highest four bits). Other bits encode MOV PC,Rm. */
8225 insn = (insn & 0xf000000f) | 0x01a0f000;
8226 }
8227
8228 bfd_put_32 (input_bfd, insn, hit_data);
8229 }
8230 return bfd_reloc_ok;
8231
8232 case R_ARM_MOVW_ABS_NC:
8233 case R_ARM_MOVT_ABS:
8234 case R_ARM_MOVW_PREL_NC:
8235 case R_ARM_MOVT_PREL:
8236 /* Until we properly support segment-base-relative addressing then
8237 we assume the segment base to be zero, as for the group relocations.
8238 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8239 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8240 case R_ARM_MOVW_BREL_NC:
8241 case R_ARM_MOVW_BREL:
8242 case R_ARM_MOVT_BREL:
8243 {
8244 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8245
8246 if (globals->use_rel)
8247 {
8248 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8249 signed_addend = (addend ^ 0x8000) - 0x8000;
8250 }
8251
8252 value += signed_addend;
8253
8254 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8255 value -= (input_section->output_section->vma
8256 + input_section->output_offset + rel->r_offset);
8257
8258 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8259 return bfd_reloc_overflow;
8260
8261 if (sym_flags == STT_ARM_TFUNC)
8262 value |= 1;
8263
8264 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8265 || r_type == R_ARM_MOVT_BREL)
8266 value >>= 16;
8267
8268 insn &= 0xfff0f000;
8269 insn |= value & 0xfff;
8270 insn |= (value & 0xf000) << 4;
8271 bfd_put_32 (input_bfd, insn, hit_data);
8272 }
8273 return bfd_reloc_ok;
8274
8275 case R_ARM_THM_MOVW_ABS_NC:
8276 case R_ARM_THM_MOVT_ABS:
8277 case R_ARM_THM_MOVW_PREL_NC:
8278 case R_ARM_THM_MOVT_PREL:
8279 /* Until we properly support segment-base-relative addressing then
8280 we assume the segment base to be zero, as for the above relocations.
8281 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8282 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8283 as R_ARM_THM_MOVT_ABS. */
8284 case R_ARM_THM_MOVW_BREL_NC:
8285 case R_ARM_THM_MOVW_BREL:
8286 case R_ARM_THM_MOVT_BREL:
8287 {
8288 bfd_vma insn;
8289
8290 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8291 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8292
8293 if (globals->use_rel)
8294 {
8295 addend = ((insn >> 4) & 0xf000)
8296 | ((insn >> 15) & 0x0800)
8297 | ((insn >> 4) & 0x0700)
8298 | (insn & 0x00ff);
8299 signed_addend = (addend ^ 0x8000) - 0x8000;
8300 }
8301
8302 value += signed_addend;
8303
8304 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8305 value -= (input_section->output_section->vma
8306 + input_section->output_offset + rel->r_offset);
8307
8308 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8309 return bfd_reloc_overflow;
8310
8311 if (sym_flags == STT_ARM_TFUNC)
8312 value |= 1;
8313
8314 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8315 || r_type == R_ARM_THM_MOVT_BREL)
8316 value >>= 16;
8317
8318 insn &= 0xfbf08f00;
8319 insn |= (value & 0xf000) << 4;
8320 insn |= (value & 0x0800) << 15;
8321 insn |= (value & 0x0700) << 4;
8322 insn |= (value & 0x00ff);
8323
8324 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8325 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8326 }
8327 return bfd_reloc_ok;
8328
8329 case R_ARM_ALU_PC_G0_NC:
8330 case R_ARM_ALU_PC_G1_NC:
8331 case R_ARM_ALU_PC_G0:
8332 case R_ARM_ALU_PC_G1:
8333 case R_ARM_ALU_PC_G2:
8334 case R_ARM_ALU_SB_G0_NC:
8335 case R_ARM_ALU_SB_G1_NC:
8336 case R_ARM_ALU_SB_G0:
8337 case R_ARM_ALU_SB_G1:
8338 case R_ARM_ALU_SB_G2:
8339 {
8340 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8341 bfd_vma pc = input_section->output_section->vma
8342 + input_section->output_offset + rel->r_offset;
8343 /* sb should be the origin of the *segment* containing the symbol.
8344 It is not clear how to obtain this OS-dependent value, so we
8345 make an arbitrary choice of zero. */
8346 bfd_vma sb = 0;
8347 bfd_vma residual;
8348 bfd_vma g_n;
8349 bfd_signed_vma signed_value;
8350 int group = 0;
8351
8352 /* Determine which group of bits to select. */
8353 switch (r_type)
8354 {
8355 case R_ARM_ALU_PC_G0_NC:
8356 case R_ARM_ALU_PC_G0:
8357 case R_ARM_ALU_SB_G0_NC:
8358 case R_ARM_ALU_SB_G0:
8359 group = 0;
8360 break;
8361
8362 case R_ARM_ALU_PC_G1_NC:
8363 case R_ARM_ALU_PC_G1:
8364 case R_ARM_ALU_SB_G1_NC:
8365 case R_ARM_ALU_SB_G1:
8366 group = 1;
8367 break;
8368
8369 case R_ARM_ALU_PC_G2:
8370 case R_ARM_ALU_SB_G2:
8371 group = 2;
8372 break;
8373
8374 default:
8375 abort ();
8376 }
8377
8378 /* If REL, extract the addend from the insn. If RELA, it will
8379 have already been fetched for us. */
8380 if (globals->use_rel)
8381 {
8382 int negative;
8383 bfd_vma constant = insn & 0xff;
8384 bfd_vma rotation = (insn & 0xf00) >> 8;
8385
8386 if (rotation == 0)
8387 signed_addend = constant;
8388 else
8389 {
8390 /* Compensate for the fact that in the instruction, the
8391 rotation is stored in multiples of 2 bits. */
8392 rotation *= 2;
8393
8394 /* Rotate "constant" right by "rotation" bits. */
8395 signed_addend = (constant >> rotation) |
8396 (constant << (8 * sizeof (bfd_vma) - rotation));
8397 }
8398
8399 /* Determine if the instruction is an ADD or a SUB.
8400 (For REL, this determines the sign of the addend.) */
8401 negative = identify_add_or_sub (insn);
8402 if (negative == 0)
8403 {
8404 (*_bfd_error_handler)
8405 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8406 input_bfd, input_section,
8407 (long) rel->r_offset, howto->name);
8408 return bfd_reloc_overflow;
8409 }
8410
8411 signed_addend *= negative;
8412 }
8413
8414 /* Compute the value (X) to go in the place. */
8415 if (r_type == R_ARM_ALU_PC_G0_NC
8416 || r_type == R_ARM_ALU_PC_G1_NC
8417 || r_type == R_ARM_ALU_PC_G0
8418 || r_type == R_ARM_ALU_PC_G1
8419 || r_type == R_ARM_ALU_PC_G2)
8420 /* PC relative. */
8421 signed_value = value - pc + signed_addend;
8422 else
8423 /* Section base relative. */
8424 signed_value = value - sb + signed_addend;
8425
8426 /* If the target symbol is a Thumb function, then set the
8427 Thumb bit in the address. */
8428 if (sym_flags == STT_ARM_TFUNC)
8429 signed_value |= 1;
8430
8431 /* Calculate the value of the relevant G_n, in encoded
8432 constant-with-rotation format. */
8433 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8434 &residual);
8435
8436 /* Check for overflow if required. */
8437 if ((r_type == R_ARM_ALU_PC_G0
8438 || r_type == R_ARM_ALU_PC_G1
8439 || r_type == R_ARM_ALU_PC_G2
8440 || r_type == R_ARM_ALU_SB_G0
8441 || r_type == R_ARM_ALU_SB_G1
8442 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8443 {
8444 (*_bfd_error_handler)
8445 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8446 input_bfd, input_section,
8447 (long) rel->r_offset, abs (signed_value), howto->name);
8448 return bfd_reloc_overflow;
8449 }
8450
8451 /* Mask out the value and the ADD/SUB part of the opcode; take care
8452 not to destroy the S bit. */
8453 insn &= 0xff1ff000;
8454
8455 /* Set the opcode according to whether the value to go in the
8456 place is negative. */
8457 if (signed_value < 0)
8458 insn |= 1 << 22;
8459 else
8460 insn |= 1 << 23;
8461
8462 /* Encode the offset. */
8463 insn |= g_n;
8464
8465 bfd_put_32 (input_bfd, insn, hit_data);
8466 }
8467 return bfd_reloc_ok;
8468
8469 case R_ARM_LDR_PC_G0:
8470 case R_ARM_LDR_PC_G1:
8471 case R_ARM_LDR_PC_G2:
8472 case R_ARM_LDR_SB_G0:
8473 case R_ARM_LDR_SB_G1:
8474 case R_ARM_LDR_SB_G2:
8475 {
8476 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8477 bfd_vma pc = input_section->output_section->vma
8478 + input_section->output_offset + rel->r_offset;
8479 bfd_vma sb = 0; /* See note above. */
8480 bfd_vma residual;
8481 bfd_signed_vma signed_value;
8482 int group = 0;
8483
8484 /* Determine which groups of bits to calculate. */
8485 switch (r_type)
8486 {
8487 case R_ARM_LDR_PC_G0:
8488 case R_ARM_LDR_SB_G0:
8489 group = 0;
8490 break;
8491
8492 case R_ARM_LDR_PC_G1:
8493 case R_ARM_LDR_SB_G1:
8494 group = 1;
8495 break;
8496
8497 case R_ARM_LDR_PC_G2:
8498 case R_ARM_LDR_SB_G2:
8499 group = 2;
8500 break;
8501
8502 default:
8503 abort ();
8504 }
8505
8506 /* If REL, extract the addend from the insn. If RELA, it will
8507 have already been fetched for us. */
8508 if (globals->use_rel)
8509 {
8510 int negative = (insn & (1 << 23)) ? 1 : -1;
8511 signed_addend = negative * (insn & 0xfff);
8512 }
8513
8514 /* Compute the value (X) to go in the place. */
8515 if (r_type == R_ARM_LDR_PC_G0
8516 || r_type == R_ARM_LDR_PC_G1
8517 || r_type == R_ARM_LDR_PC_G2)
8518 /* PC relative. */
8519 signed_value = value - pc + signed_addend;
8520 else
8521 /* Section base relative. */
8522 signed_value = value - sb + signed_addend;
8523
8524 /* Calculate the value of the relevant G_{n-1} to obtain
8525 the residual at that stage. */
8526 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8527
8528 /* Check for overflow. */
8529 if (residual >= 0x1000)
8530 {
8531 (*_bfd_error_handler)
8532 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8533 input_bfd, input_section,
8534 (long) rel->r_offset, abs (signed_value), howto->name);
8535 return bfd_reloc_overflow;
8536 }
8537
8538 /* Mask out the value and U bit. */
8539 insn &= 0xff7ff000;
8540
8541 /* Set the U bit if the value to go in the place is non-negative. */
8542 if (signed_value >= 0)
8543 insn |= 1 << 23;
8544
8545 /* Encode the offset. */
8546 insn |= residual;
8547
8548 bfd_put_32 (input_bfd, insn, hit_data);
8549 }
8550 return bfd_reloc_ok;
8551
8552 case R_ARM_LDRS_PC_G0:
8553 case R_ARM_LDRS_PC_G1:
8554 case R_ARM_LDRS_PC_G2:
8555 case R_ARM_LDRS_SB_G0:
8556 case R_ARM_LDRS_SB_G1:
8557 case R_ARM_LDRS_SB_G2:
8558 {
8559 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8560 bfd_vma pc = input_section->output_section->vma
8561 + input_section->output_offset + rel->r_offset;
8562 bfd_vma sb = 0; /* See note above. */
8563 bfd_vma residual;
8564 bfd_signed_vma signed_value;
8565 int group = 0;
8566
8567 /* Determine which groups of bits to calculate. */
8568 switch (r_type)
8569 {
8570 case R_ARM_LDRS_PC_G0:
8571 case R_ARM_LDRS_SB_G0:
8572 group = 0;
8573 break;
8574
8575 case R_ARM_LDRS_PC_G1:
8576 case R_ARM_LDRS_SB_G1:
8577 group = 1;
8578 break;
8579
8580 case R_ARM_LDRS_PC_G2:
8581 case R_ARM_LDRS_SB_G2:
8582 group = 2;
8583 break;
8584
8585 default:
8586 abort ();
8587 }
8588
8589 /* If REL, extract the addend from the insn. If RELA, it will
8590 have already been fetched for us. */
8591 if (globals->use_rel)
8592 {
8593 int negative = (insn & (1 << 23)) ? 1 : -1;
8594 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8595 }
8596
8597 /* Compute the value (X) to go in the place. */
8598 if (r_type == R_ARM_LDRS_PC_G0
8599 || r_type == R_ARM_LDRS_PC_G1
8600 || r_type == R_ARM_LDRS_PC_G2)
8601 /* PC relative. */
8602 signed_value = value - pc + signed_addend;
8603 else
8604 /* Section base relative. */
8605 signed_value = value - sb + signed_addend;
8606
8607 /* Calculate the value of the relevant G_{n-1} to obtain
8608 the residual at that stage. */
8609 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8610
8611 /* Check for overflow. */
8612 if (residual >= 0x100)
8613 {
8614 (*_bfd_error_handler)
8615 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8616 input_bfd, input_section,
8617 (long) rel->r_offset, abs (signed_value), howto->name);
8618 return bfd_reloc_overflow;
8619 }
8620
8621 /* Mask out the value and U bit. */
8622 insn &= 0xff7ff0f0;
8623
8624 /* Set the U bit if the value to go in the place is non-negative. */
8625 if (signed_value >= 0)
8626 insn |= 1 << 23;
8627
8628 /* Encode the offset. */
8629 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8630
8631 bfd_put_32 (input_bfd, insn, hit_data);
8632 }
8633 return bfd_reloc_ok;
8634
8635 case R_ARM_LDC_PC_G0:
8636 case R_ARM_LDC_PC_G1:
8637 case R_ARM_LDC_PC_G2:
8638 case R_ARM_LDC_SB_G0:
8639 case R_ARM_LDC_SB_G1:
8640 case R_ARM_LDC_SB_G2:
8641 {
8642 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8643 bfd_vma pc = input_section->output_section->vma
8644 + input_section->output_offset + rel->r_offset;
8645 bfd_vma sb = 0; /* See note above. */
8646 bfd_vma residual;
8647 bfd_signed_vma signed_value;
8648 int group = 0;
8649
8650 /* Determine which groups of bits to calculate. */
8651 switch (r_type)
8652 {
8653 case R_ARM_LDC_PC_G0:
8654 case R_ARM_LDC_SB_G0:
8655 group = 0;
8656 break;
8657
8658 case R_ARM_LDC_PC_G1:
8659 case R_ARM_LDC_SB_G1:
8660 group = 1;
8661 break;
8662
8663 case R_ARM_LDC_PC_G2:
8664 case R_ARM_LDC_SB_G2:
8665 group = 2;
8666 break;
8667
8668 default:
8669 abort ();
8670 }
8671
8672 /* If REL, extract the addend from the insn. If RELA, it will
8673 have already been fetched for us. */
8674 if (globals->use_rel)
8675 {
8676 int negative = (insn & (1 << 23)) ? 1 : -1;
8677 signed_addend = negative * ((insn & 0xff) << 2);
8678 }
8679
8680 /* Compute the value (X) to go in the place. */
8681 if (r_type == R_ARM_LDC_PC_G0
8682 || r_type == R_ARM_LDC_PC_G1
8683 || r_type == R_ARM_LDC_PC_G2)
8684 /* PC relative. */
8685 signed_value = value - pc + signed_addend;
8686 else
8687 /* Section base relative. */
8688 signed_value = value - sb + signed_addend;
8689
8690 /* Calculate the value of the relevant G_{n-1} to obtain
8691 the residual at that stage. */
8692 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8693
8694 /* Check for overflow. (The absolute value to go in the place must be
8695 divisible by four and, after having been divided by four, must
8696 fit in eight bits.) */
8697 if ((residual & 0x3) != 0 || residual >= 0x400)
8698 {
8699 (*_bfd_error_handler)
8700 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8701 input_bfd, input_section,
8702 (long) rel->r_offset, abs (signed_value), howto->name);
8703 return bfd_reloc_overflow;
8704 }
8705
8706 /* Mask out the value and U bit. */
8707 insn &= 0xff7fff00;
8708
8709 /* Set the U bit if the value to go in the place is non-negative. */
8710 if (signed_value >= 0)
8711 insn |= 1 << 23;
8712
8713 /* Encode the offset. */
8714 insn |= residual >> 2;
8715
8716 bfd_put_32 (input_bfd, insn, hit_data);
8717 }
8718 return bfd_reloc_ok;
8719
8720 default:
8721 return bfd_reloc_notsupported;
8722 }
8723 }
8724
8725 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8726 static void
8727 arm_add_to_rel (bfd * abfd,
8728 bfd_byte * address,
8729 reloc_howto_type * howto,
8730 bfd_signed_vma increment)
8731 {
8732 bfd_signed_vma addend;
8733
8734 if (howto->type == R_ARM_THM_CALL
8735 || howto->type == R_ARM_THM_JUMP24)
8736 {
8737 int upper_insn, lower_insn;
8738 int upper, lower;
8739
8740 upper_insn = bfd_get_16 (abfd, address);
8741 lower_insn = bfd_get_16 (abfd, address + 2);
8742 upper = upper_insn & 0x7ff;
8743 lower = lower_insn & 0x7ff;
8744
8745 addend = (upper << 12) | (lower << 1);
8746 addend += increment;
8747 addend >>= 1;
8748
8749 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8750 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8751
8752 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8753 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8754 }
8755 else
8756 {
8757 bfd_vma contents;
8758
8759 contents = bfd_get_32 (abfd, address);
8760
8761 /* Get the (signed) value from the instruction. */
8762 addend = contents & howto->src_mask;
8763 if (addend & ((howto->src_mask + 1) >> 1))
8764 {
8765 bfd_signed_vma mask;
8766
8767 mask = -1;
8768 mask &= ~ howto->src_mask;
8769 addend |= mask;
8770 }
8771
8772 /* Add in the increment, (which is a byte value). */
8773 switch (howto->type)
8774 {
8775 default:
8776 addend += increment;
8777 break;
8778
8779 case R_ARM_PC24:
8780 case R_ARM_PLT32:
8781 case R_ARM_CALL:
8782 case R_ARM_JUMP24:
8783 addend <<= howto->size;
8784 addend += increment;
8785
8786 /* Should we check for overflow here ? */
8787
8788 /* Drop any undesired bits. */
8789 addend >>= howto->rightshift;
8790 break;
8791 }
8792
8793 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8794
8795 bfd_put_32 (abfd, contents, address);
8796 }
8797 }
8798
8799 #define IS_ARM_TLS_RELOC(R_TYPE) \
8800 ((R_TYPE) == R_ARM_TLS_GD32 \
8801 || (R_TYPE) == R_ARM_TLS_LDO32 \
8802 || (R_TYPE) == R_ARM_TLS_LDM32 \
8803 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8804 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8805 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8806 || (R_TYPE) == R_ARM_TLS_LE32 \
8807 || (R_TYPE) == R_ARM_TLS_IE32)
8808
8809 /* Relocate an ARM ELF section. */
8810
8811 static bfd_boolean
8812 elf32_arm_relocate_section (bfd * output_bfd,
8813 struct bfd_link_info * info,
8814 bfd * input_bfd,
8815 asection * input_section,
8816 bfd_byte * contents,
8817 Elf_Internal_Rela * relocs,
8818 Elf_Internal_Sym * local_syms,
8819 asection ** local_sections)
8820 {
8821 Elf_Internal_Shdr *symtab_hdr;
8822 struct elf_link_hash_entry **sym_hashes;
8823 Elf_Internal_Rela *rel;
8824 Elf_Internal_Rela *relend;
8825 const char *name;
8826 struct elf32_arm_link_hash_table * globals;
8827
8828 globals = elf32_arm_hash_table (info);
8829 if (globals == NULL)
8830 return FALSE;
8831
8832 symtab_hdr = & elf_symtab_hdr (input_bfd);
8833 sym_hashes = elf_sym_hashes (input_bfd);
8834
8835 rel = relocs;
8836 relend = relocs + input_section->reloc_count;
8837 for (; rel < relend; rel++)
8838 {
8839 int r_type;
8840 reloc_howto_type * howto;
8841 unsigned long r_symndx;
8842 Elf_Internal_Sym * sym;
8843 asection * sec;
8844 struct elf_link_hash_entry * h;
8845 bfd_vma relocation;
8846 bfd_reloc_status_type r;
8847 arelent bfd_reloc;
8848 char sym_type;
8849 bfd_boolean unresolved_reloc = FALSE;
8850 char *error_message = NULL;
8851
8852 r_symndx = ELF32_R_SYM (rel->r_info);
8853 r_type = ELF32_R_TYPE (rel->r_info);
8854 r_type = arm_real_reloc_type (globals, r_type);
8855
8856 if ( r_type == R_ARM_GNU_VTENTRY
8857 || r_type == R_ARM_GNU_VTINHERIT)
8858 continue;
8859
8860 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8861 howto = bfd_reloc.howto;
8862
8863 h = NULL;
8864 sym = NULL;
8865 sec = NULL;
8866
8867 if (r_symndx < symtab_hdr->sh_info)
8868 {
8869 sym = local_syms + r_symndx;
8870 sym_type = ELF32_ST_TYPE (sym->st_info);
8871 sec = local_sections[r_symndx];
8872
8873 /* An object file might have a reference to a local
8874 undefined symbol. This is a daft object file, but we
8875 should at least do something about it. V4BX & NONE
8876 relocations do not use the symbol and are explicitly
8877 allowed to use the undefined symbol, so allow those. */
8878 if (r_type != R_ARM_V4BX
8879 && r_type != R_ARM_NONE
8880 && bfd_is_und_section (sec)
8881 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8882 {
8883 if (!info->callbacks->undefined_symbol
8884 (info, bfd_elf_string_from_elf_section
8885 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8886 input_bfd, input_section,
8887 rel->r_offset, TRUE))
8888 return FALSE;
8889 }
8890
8891 if (globals->use_rel)
8892 {
8893 relocation = (sec->output_section->vma
8894 + sec->output_offset
8895 + sym->st_value);
8896 if (!info->relocatable
8897 && (sec->flags & SEC_MERGE)
8898 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8899 {
8900 asection *msec;
8901 bfd_vma addend, value;
8902
8903 switch (r_type)
8904 {
8905 case R_ARM_MOVW_ABS_NC:
8906 case R_ARM_MOVT_ABS:
8907 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8908 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8909 addend = (addend ^ 0x8000) - 0x8000;
8910 break;
8911
8912 case R_ARM_THM_MOVW_ABS_NC:
8913 case R_ARM_THM_MOVT_ABS:
8914 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8915 << 16;
8916 value |= bfd_get_16 (input_bfd,
8917 contents + rel->r_offset + 2);
8918 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8919 | ((value & 0x04000000) >> 15);
8920 addend = (addend ^ 0x8000) - 0x8000;
8921 break;
8922
8923 default:
8924 if (howto->rightshift
8925 || (howto->src_mask & (howto->src_mask + 1)))
8926 {
8927 (*_bfd_error_handler)
8928 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8929 input_bfd, input_section,
8930 (long) rel->r_offset, howto->name);
8931 return FALSE;
8932 }
8933
8934 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8935
8936 /* Get the (signed) value from the instruction. */
8937 addend = value & howto->src_mask;
8938 if (addend & ((howto->src_mask + 1) >> 1))
8939 {
8940 bfd_signed_vma mask;
8941
8942 mask = -1;
8943 mask &= ~ howto->src_mask;
8944 addend |= mask;
8945 }
8946 break;
8947 }
8948
8949 msec = sec;
8950 addend =
8951 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8952 - relocation;
8953 addend += msec->output_section->vma + msec->output_offset;
8954
8955 /* Cases here must match those in the preceeding
8956 switch statement. */
8957 switch (r_type)
8958 {
8959 case R_ARM_MOVW_ABS_NC:
8960 case R_ARM_MOVT_ABS:
8961 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8962 | (addend & 0xfff);
8963 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8964 break;
8965
8966 case R_ARM_THM_MOVW_ABS_NC:
8967 case R_ARM_THM_MOVT_ABS:
8968 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8969 | (addend & 0xff) | ((addend & 0x0800) << 15);
8970 bfd_put_16 (input_bfd, value >> 16,
8971 contents + rel->r_offset);
8972 bfd_put_16 (input_bfd, value,
8973 contents + rel->r_offset + 2);
8974 break;
8975
8976 default:
8977 value = (value & ~ howto->dst_mask)
8978 | (addend & howto->dst_mask);
8979 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8980 break;
8981 }
8982 }
8983 }
8984 else
8985 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8986 }
8987 else
8988 {
8989 bfd_boolean warned;
8990
8991 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8992 r_symndx, symtab_hdr, sym_hashes,
8993 h, sec, relocation,
8994 unresolved_reloc, warned);
8995
8996 sym_type = h->type;
8997 }
8998
8999 if (sec != NULL && elf_discarded_section (sec))
9000 {
9001 /* For relocs against symbols from removed linkonce sections,
9002 or sections discarded by a linker script, we just want the
9003 section contents zeroed. Avoid any special processing. */
9004 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
9005 rel->r_info = 0;
9006 rel->r_addend = 0;
9007 continue;
9008 }
9009
9010 if (info->relocatable)
9011 {
9012 /* This is a relocatable link. We don't have to change
9013 anything, unless the reloc is against a section symbol,
9014 in which case we have to adjust according to where the
9015 section symbol winds up in the output section. */
9016 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
9017 {
9018 if (globals->use_rel)
9019 arm_add_to_rel (input_bfd, contents + rel->r_offset,
9020 howto, (bfd_signed_vma) sec->output_offset);
9021 else
9022 rel->r_addend += sec->output_offset;
9023 }
9024 continue;
9025 }
9026
9027 if (h != NULL)
9028 name = h->root.root.string;
9029 else
9030 {
9031 name = (bfd_elf_string_from_elf_section
9032 (input_bfd, symtab_hdr->sh_link, sym->st_name));
9033 if (name == NULL || *name == '\0')
9034 name = bfd_section_name (input_bfd, sec);
9035 }
9036
9037 if (r_symndx != 0
9038 && r_type != R_ARM_NONE
9039 && (h == NULL
9040 || h->root.type == bfd_link_hash_defined
9041 || h->root.type == bfd_link_hash_defweak)
9042 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9043 {
9044 (*_bfd_error_handler)
9045 ((sym_type == STT_TLS
9046 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9047 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9048 input_bfd,
9049 input_section,
9050 (long) rel->r_offset,
9051 howto->name,
9052 name);
9053 }
9054
9055 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9056 input_section, contents, rel,
9057 relocation, info, sec, name,
9058 (h ? ELF_ST_TYPE (h->type) :
9059 ELF_ST_TYPE (sym->st_info)), h,
9060 &unresolved_reloc, &error_message);
9061
9062 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9063 because such sections are not SEC_ALLOC and thus ld.so will
9064 not process them. */
9065 if (unresolved_reloc
9066 && !((input_section->flags & SEC_DEBUGGING) != 0
9067 && h->def_dynamic))
9068 {
9069 (*_bfd_error_handler)
9070 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9071 input_bfd,
9072 input_section,
9073 (long) rel->r_offset,
9074 howto->name,
9075 h->root.root.string);
9076 return FALSE;
9077 }
9078
9079 if (r != bfd_reloc_ok)
9080 {
9081 switch (r)
9082 {
9083 case bfd_reloc_overflow:
9084 /* If the overflowing reloc was to an undefined symbol,
9085 we have already printed one error message and there
9086 is no point complaining again. */
9087 if ((! h ||
9088 h->root.type != bfd_link_hash_undefined)
9089 && (!((*info->callbacks->reloc_overflow)
9090 (info, (h ? &h->root : NULL), name, howto->name,
9091 (bfd_vma) 0, input_bfd, input_section,
9092 rel->r_offset))))
9093 return FALSE;
9094 break;
9095
9096 case bfd_reloc_undefined:
9097 if (!((*info->callbacks->undefined_symbol)
9098 (info, name, input_bfd, input_section,
9099 rel->r_offset, TRUE)))
9100 return FALSE;
9101 break;
9102
9103 case bfd_reloc_outofrange:
9104 error_message = _("out of range");
9105 goto common_error;
9106
9107 case bfd_reloc_notsupported:
9108 error_message = _("unsupported relocation");
9109 goto common_error;
9110
9111 case bfd_reloc_dangerous:
9112 /* error_message should already be set. */
9113 goto common_error;
9114
9115 default:
9116 error_message = _("unknown error");
9117 /* Fall through. */
9118
9119 common_error:
9120 BFD_ASSERT (error_message != NULL);
9121 if (!((*info->callbacks->reloc_dangerous)
9122 (info, error_message, input_bfd, input_section,
9123 rel->r_offset)))
9124 return FALSE;
9125 break;
9126 }
9127 }
9128 }
9129
9130 return TRUE;
9131 }
9132
9133 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9134 adds the edit to the start of the list. (The list must be built in order of
9135 ascending TINDEX: the function's callers are primarily responsible for
9136 maintaining that condition). */
9137
9138 static void
9139 add_unwind_table_edit (arm_unwind_table_edit **head,
9140 arm_unwind_table_edit **tail,
9141 arm_unwind_edit_type type,
9142 asection *linked_section,
9143 unsigned int tindex)
9144 {
9145 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9146 xmalloc (sizeof (arm_unwind_table_edit));
9147
9148 new_edit->type = type;
9149 new_edit->linked_section = linked_section;
9150 new_edit->index = tindex;
9151
9152 if (tindex > 0)
9153 {
9154 new_edit->next = NULL;
9155
9156 if (*tail)
9157 (*tail)->next = new_edit;
9158
9159 (*tail) = new_edit;
9160
9161 if (!*head)
9162 (*head) = new_edit;
9163 }
9164 else
9165 {
9166 new_edit->next = *head;
9167
9168 if (!*tail)
9169 *tail = new_edit;
9170
9171 *head = new_edit;
9172 }
9173 }
9174
9175 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9176
9177 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9178 static void
9179 adjust_exidx_size(asection *exidx_sec, int adjust)
9180 {
9181 asection *out_sec;
9182
9183 if (!exidx_sec->rawsize)
9184 exidx_sec->rawsize = exidx_sec->size;
9185
9186 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9187 out_sec = exidx_sec->output_section;
9188 /* Adjust size of output section. */
9189 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9190 }
9191
9192 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9193 static void
9194 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9195 {
9196 struct _arm_elf_section_data *exidx_arm_data;
9197
9198 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9199 add_unwind_table_edit (
9200 &exidx_arm_data->u.exidx.unwind_edit_list,
9201 &exidx_arm_data->u.exidx.unwind_edit_tail,
9202 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9203
9204 adjust_exidx_size(exidx_sec, 8);
9205 }
9206
9207 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9208 made to those tables, such that:
9209
9210 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9211 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9212 codes which have been inlined into the index).
9213
9214 The edits are applied when the tables are written
9215 (in elf32_arm_write_section).
9216 */
9217
9218 bfd_boolean
9219 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9220 unsigned int num_text_sections,
9221 struct bfd_link_info *info)
9222 {
9223 bfd *inp;
9224 unsigned int last_second_word = 0, i;
9225 asection *last_exidx_sec = NULL;
9226 asection *last_text_sec = NULL;
9227 int last_unwind_type = -1;
9228
9229 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9230 text sections. */
9231 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9232 {
9233 asection *sec;
9234
9235 for (sec = inp->sections; sec != NULL; sec = sec->next)
9236 {
9237 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9238 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9239
9240 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9241 continue;
9242
9243 if (elf_sec->linked_to)
9244 {
9245 Elf_Internal_Shdr *linked_hdr
9246 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9247 struct _arm_elf_section_data *linked_sec_arm_data
9248 = get_arm_elf_section_data (linked_hdr->bfd_section);
9249
9250 if (linked_sec_arm_data == NULL)
9251 continue;
9252
9253 /* Link this .ARM.exidx section back from the text section it
9254 describes. */
9255 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9256 }
9257 }
9258 }
9259
9260 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9261 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9262 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9263
9264 for (i = 0; i < num_text_sections; i++)
9265 {
9266 asection *sec = text_section_order[i];
9267 asection *exidx_sec;
9268 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9269 struct _arm_elf_section_data *exidx_arm_data;
9270 bfd_byte *contents = NULL;
9271 int deleted_exidx_bytes = 0;
9272 bfd_vma j;
9273 arm_unwind_table_edit *unwind_edit_head = NULL;
9274 arm_unwind_table_edit *unwind_edit_tail = NULL;
9275 Elf_Internal_Shdr *hdr;
9276 bfd *ibfd;
9277
9278 if (arm_data == NULL)
9279 continue;
9280
9281 exidx_sec = arm_data->u.text.arm_exidx_sec;
9282 if (exidx_sec == NULL)
9283 {
9284 /* Section has no unwind data. */
9285 if (last_unwind_type == 0 || !last_exidx_sec)
9286 continue;
9287
9288 /* Ignore zero sized sections. */
9289 if (sec->size == 0)
9290 continue;
9291
9292 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9293 last_unwind_type = 0;
9294 continue;
9295 }
9296
9297 /* Skip /DISCARD/ sections. */
9298 if (bfd_is_abs_section (exidx_sec->output_section))
9299 continue;
9300
9301 hdr = &elf_section_data (exidx_sec)->this_hdr;
9302 if (hdr->sh_type != SHT_ARM_EXIDX)
9303 continue;
9304
9305 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9306 if (exidx_arm_data == NULL)
9307 continue;
9308
9309 ibfd = exidx_sec->owner;
9310
9311 if (hdr->contents != NULL)
9312 contents = hdr->contents;
9313 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9314 /* An error? */
9315 continue;
9316
9317 for (j = 0; j < hdr->sh_size; j += 8)
9318 {
9319 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9320 int unwind_type;
9321 int elide = 0;
9322
9323 /* An EXIDX_CANTUNWIND entry. */
9324 if (second_word == 1)
9325 {
9326 if (last_unwind_type == 0)
9327 elide = 1;
9328 unwind_type = 0;
9329 }
9330 /* Inlined unwinding data. Merge if equal to previous. */
9331 else if ((second_word & 0x80000000) != 0)
9332 {
9333 if (last_second_word == second_word && last_unwind_type == 1)
9334 elide = 1;
9335 unwind_type = 1;
9336 last_second_word = second_word;
9337 }
9338 /* Normal table entry. In theory we could merge these too,
9339 but duplicate entries are likely to be much less common. */
9340 else
9341 unwind_type = 2;
9342
9343 if (elide)
9344 {
9345 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9346 DELETE_EXIDX_ENTRY, NULL, j / 8);
9347
9348 deleted_exidx_bytes += 8;
9349 }
9350
9351 last_unwind_type = unwind_type;
9352 }
9353
9354 /* Free contents if we allocated it ourselves. */
9355 if (contents != hdr->contents)
9356 free (contents);
9357
9358 /* Record edits to be applied later (in elf32_arm_write_section). */
9359 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9360 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9361
9362 if (deleted_exidx_bytes > 0)
9363 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9364
9365 last_exidx_sec = exidx_sec;
9366 last_text_sec = sec;
9367 }
9368
9369 /* Add terminating CANTUNWIND entry. */
9370 if (last_exidx_sec && last_unwind_type != 0)
9371 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9372
9373 return TRUE;
9374 }
9375
9376 static bfd_boolean
9377 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9378 bfd *ibfd, const char *name)
9379 {
9380 asection *sec, *osec;
9381
9382 sec = bfd_get_section_by_name (ibfd, name);
9383 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9384 return TRUE;
9385
9386 osec = sec->output_section;
9387 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9388 return TRUE;
9389
9390 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9391 sec->output_offset, sec->size))
9392 return FALSE;
9393
9394 return TRUE;
9395 }
9396
9397 static bfd_boolean
9398 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9399 {
9400 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9401 asection *sec, *osec;
9402
9403 if (globals == NULL)
9404 return FALSE;
9405
9406 /* Invoke the regular ELF backend linker to do all the work. */
9407 if (!bfd_elf_final_link (abfd, info))
9408 return FALSE;
9409
9410 /* Process stub sections (eg BE8 encoding, ...). */
9411 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
9412 int i;
9413 for(i=0; i<htab->top_id; i++) {
9414 sec = htab->stub_group[i].stub_sec;
9415 if (sec) {
9416 osec = sec->output_section;
9417 elf32_arm_write_section (abfd, info, sec, sec->contents);
9418 if (! bfd_set_section_contents (abfd, osec, sec->contents,
9419 sec->output_offset, sec->size))
9420 return FALSE;
9421 }
9422 }
9423
9424 /* Write out any glue sections now that we have created all the
9425 stubs. */
9426 if (globals->bfd_of_glue_owner != NULL)
9427 {
9428 if (! elf32_arm_output_glue_section (info, abfd,
9429 globals->bfd_of_glue_owner,
9430 ARM2THUMB_GLUE_SECTION_NAME))
9431 return FALSE;
9432
9433 if (! elf32_arm_output_glue_section (info, abfd,
9434 globals->bfd_of_glue_owner,
9435 THUMB2ARM_GLUE_SECTION_NAME))
9436 return FALSE;
9437
9438 if (! elf32_arm_output_glue_section (info, abfd,
9439 globals->bfd_of_glue_owner,
9440 VFP11_ERRATUM_VENEER_SECTION_NAME))
9441 return FALSE;
9442
9443 if (! elf32_arm_output_glue_section (info, abfd,
9444 globals->bfd_of_glue_owner,
9445 ARM_BX_GLUE_SECTION_NAME))
9446 return FALSE;
9447 }
9448
9449 return TRUE;
9450 }
9451
9452 /* Set the right machine number. */
9453
9454 static bfd_boolean
9455 elf32_arm_object_p (bfd *abfd)
9456 {
9457 unsigned int mach;
9458
9459 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9460
9461 if (mach != bfd_mach_arm_unknown)
9462 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9463
9464 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9465 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9466
9467 else
9468 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9469
9470 return TRUE;
9471 }
9472
9473 /* Function to keep ARM specific flags in the ELF header. */
9474
9475 static bfd_boolean
9476 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9477 {
9478 if (elf_flags_init (abfd)
9479 && elf_elfheader (abfd)->e_flags != flags)
9480 {
9481 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9482 {
9483 if (flags & EF_ARM_INTERWORK)
9484 (*_bfd_error_handler)
9485 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9486 abfd);
9487 else
9488 _bfd_error_handler
9489 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9490 abfd);
9491 }
9492 }
9493 else
9494 {
9495 elf_elfheader (abfd)->e_flags = flags;
9496 elf_flags_init (abfd) = TRUE;
9497 }
9498
9499 return TRUE;
9500 }
9501
9502 /* Copy backend specific data from one object module to another. */
9503
9504 static bfd_boolean
9505 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9506 {
9507 flagword in_flags;
9508 flagword out_flags;
9509
9510 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9511 return TRUE;
9512
9513 in_flags = elf_elfheader (ibfd)->e_flags;
9514 out_flags = elf_elfheader (obfd)->e_flags;
9515
9516 if (elf_flags_init (obfd)
9517 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9518 && in_flags != out_flags)
9519 {
9520 /* Cannot mix APCS26 and APCS32 code. */
9521 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9522 return FALSE;
9523
9524 /* Cannot mix float APCS and non-float APCS code. */
9525 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9526 return FALSE;
9527
9528 /* If the src and dest have different interworking flags
9529 then turn off the interworking bit. */
9530 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9531 {
9532 if (out_flags & EF_ARM_INTERWORK)
9533 _bfd_error_handler
9534 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9535 obfd, ibfd);
9536
9537 in_flags &= ~EF_ARM_INTERWORK;
9538 }
9539
9540 /* Likewise for PIC, though don't warn for this case. */
9541 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9542 in_flags &= ~EF_ARM_PIC;
9543 }
9544
9545 elf_elfheader (obfd)->e_flags = in_flags;
9546 elf_flags_init (obfd) = TRUE;
9547
9548 /* Also copy the EI_OSABI field. */
9549 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9550 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9551
9552 /* Copy object attributes. */
9553 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9554
9555 return TRUE;
9556 }
9557
9558 /* Values for Tag_ABI_PCS_R9_use. */
9559 enum
9560 {
9561 AEABI_R9_V6,
9562 AEABI_R9_SB,
9563 AEABI_R9_TLS,
9564 AEABI_R9_unused
9565 };
9566
9567 /* Values for Tag_ABI_PCS_RW_data. */
9568 enum
9569 {
9570 AEABI_PCS_RW_data_absolute,
9571 AEABI_PCS_RW_data_PCrel,
9572 AEABI_PCS_RW_data_SBrel,
9573 AEABI_PCS_RW_data_unused
9574 };
9575
9576 /* Values for Tag_ABI_enum_size. */
9577 enum
9578 {
9579 AEABI_enum_unused,
9580 AEABI_enum_short,
9581 AEABI_enum_wide,
9582 AEABI_enum_forced_wide
9583 };
9584
9585 /* Determine whether an object attribute tag takes an integer, a
9586 string or both. */
9587
9588 static int
9589 elf32_arm_obj_attrs_arg_type (int tag)
9590 {
9591 if (tag == Tag_compatibility)
9592 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9593 else if (tag == Tag_nodefaults)
9594 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9595 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9596 return ATTR_TYPE_FLAG_STR_VAL;
9597 else if (tag < 32)
9598 return ATTR_TYPE_FLAG_INT_VAL;
9599 else
9600 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9601 }
9602
9603 /* The ABI defines that Tag_conformance should be emitted first, and that
9604 Tag_nodefaults should be second (if either is defined). This sets those
9605 two positions, and bumps up the position of all the remaining tags to
9606 compensate. */
9607 static int
9608 elf32_arm_obj_attrs_order (int num)
9609 {
9610 if (num == 4)
9611 return Tag_conformance;
9612 if (num == 5)
9613 return Tag_nodefaults;
9614 if ((num - 2) < Tag_nodefaults)
9615 return num - 2;
9616 if ((num - 1) < Tag_conformance)
9617 return num - 1;
9618 return num;
9619 }
9620
9621 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9622 Returns -1 if no architecture could be read. */
9623
9624 static int
9625 get_secondary_compatible_arch (bfd *abfd)
9626 {
9627 obj_attribute *attr =
9628 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9629
9630 /* Note: the tag and its argument below are uleb128 values, though
9631 currently-defined values fit in one byte for each. */
9632 if (attr->s
9633 && attr->s[0] == Tag_CPU_arch
9634 && (attr->s[1] & 128) != 128
9635 && attr->s[2] == 0)
9636 return attr->s[1];
9637
9638 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9639 return -1;
9640 }
9641
9642 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9643 The tag is removed if ARCH is -1. */
9644
9645 static void
9646 set_secondary_compatible_arch (bfd *abfd, int arch)
9647 {
9648 obj_attribute *attr =
9649 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9650
9651 if (arch == -1)
9652 {
9653 attr->s = NULL;
9654 return;
9655 }
9656
9657 /* Note: the tag and its argument below are uleb128 values, though
9658 currently-defined values fit in one byte for each. */
9659 if (!attr->s)
9660 attr->s = (char *) bfd_alloc (abfd, 3);
9661 attr->s[0] = Tag_CPU_arch;
9662 attr->s[1] = arch;
9663 attr->s[2] = '\0';
9664 }
9665
9666 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9667 into account. */
9668
9669 static int
9670 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9671 int newtag, int secondary_compat)
9672 {
9673 #define T(X) TAG_CPU_ARCH_##X
9674 int tagl, tagh, result;
9675 const int v6t2[] =
9676 {
9677 T(V6T2), /* PRE_V4. */
9678 T(V6T2), /* V4. */
9679 T(V6T2), /* V4T. */
9680 T(V6T2), /* V5T. */
9681 T(V6T2), /* V5TE. */
9682 T(V6T2), /* V5TEJ. */
9683 T(V6T2), /* V6. */
9684 T(V7), /* V6KZ. */
9685 T(V6T2) /* V6T2. */
9686 };
9687 const int v6k[] =
9688 {
9689 T(V6K), /* PRE_V4. */
9690 T(V6K), /* V4. */
9691 T(V6K), /* V4T. */
9692 T(V6K), /* V5T. */
9693 T(V6K), /* V5TE. */
9694 T(V6K), /* V5TEJ. */
9695 T(V6K), /* V6. */
9696 T(V6KZ), /* V6KZ. */
9697 T(V7), /* V6T2. */
9698 T(V6K) /* V6K. */
9699 };
9700 const int v7[] =
9701 {
9702 T(V7), /* PRE_V4. */
9703 T(V7), /* V4. */
9704 T(V7), /* V4T. */
9705 T(V7), /* V5T. */
9706 T(V7), /* V5TE. */
9707 T(V7), /* V5TEJ. */
9708 T(V7), /* V6. */
9709 T(V7), /* V6KZ. */
9710 T(V7), /* V6T2. */
9711 T(V7), /* V6K. */
9712 T(V7) /* V7. */
9713 };
9714 const int v6_m[] =
9715 {
9716 -1, /* PRE_V4. */
9717 -1, /* V4. */
9718 T(V6K), /* V4T. */
9719 T(V6K), /* V5T. */
9720 T(V6K), /* V5TE. */
9721 T(V6K), /* V5TEJ. */
9722 T(V6K), /* V6. */
9723 T(V6KZ), /* V6KZ. */
9724 T(V7), /* V6T2. */
9725 T(V6K), /* V6K. */
9726 T(V7), /* V7. */
9727 T(V6_M) /* V6_M. */
9728 };
9729 const int v6s_m[] =
9730 {
9731 -1, /* PRE_V4. */
9732 -1, /* V4. */
9733 T(V6K), /* V4T. */
9734 T(V6K), /* V5T. */
9735 T(V6K), /* V5TE. */
9736 T(V6K), /* V5TEJ. */
9737 T(V6K), /* V6. */
9738 T(V6KZ), /* V6KZ. */
9739 T(V7), /* V6T2. */
9740 T(V6K), /* V6K. */
9741 T(V7), /* V7. */
9742 T(V6S_M), /* V6_M. */
9743 T(V6S_M) /* V6S_M. */
9744 };
9745 const int v7e_m[] =
9746 {
9747 -1, /* PRE_V4. */
9748 -1, /* V4. */
9749 T(V7E_M), /* V4T. */
9750 T(V7E_M), /* V5T. */
9751 T(V7E_M), /* V5TE. */
9752 T(V7E_M), /* V5TEJ. */
9753 T(V7E_M), /* V6. */
9754 T(V7E_M), /* V6KZ. */
9755 T(V7E_M), /* V6T2. */
9756 T(V7E_M), /* V6K. */
9757 T(V7E_M), /* V7. */
9758 T(V7E_M), /* V6_M. */
9759 T(V7E_M), /* V6S_M. */
9760 T(V7E_M) /* V7E_M. */
9761 };
9762 const int v4t_plus_v6_m[] =
9763 {
9764 -1, /* PRE_V4. */
9765 -1, /* V4. */
9766 T(V4T), /* V4T. */
9767 T(V5T), /* V5T. */
9768 T(V5TE), /* V5TE. */
9769 T(V5TEJ), /* V5TEJ. */
9770 T(V6), /* V6. */
9771 T(V6KZ), /* V6KZ. */
9772 T(V6T2), /* V6T2. */
9773 T(V6K), /* V6K. */
9774 T(V7), /* V7. */
9775 T(V6_M), /* V6_M. */
9776 T(V6S_M), /* V6S_M. */
9777 T(V7E_M), /* V7E_M. */
9778 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9779 };
9780 const int *comb[] =
9781 {
9782 v6t2,
9783 v6k,
9784 v7,
9785 v6_m,
9786 v6s_m,
9787 v7e_m,
9788 /* Pseudo-architecture. */
9789 v4t_plus_v6_m
9790 };
9791
9792 /* Check we've not got a higher architecture than we know about. */
9793
9794 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9795 {
9796 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9797 return -1;
9798 }
9799
9800 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9801
9802 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9803 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9804 oldtag = T(V4T_PLUS_V6_M);
9805
9806 /* And override the new tag if we have a Tag_also_compatible_with on the
9807 input. */
9808
9809 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9810 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9811 newtag = T(V4T_PLUS_V6_M);
9812
9813 tagl = (oldtag < newtag) ? oldtag : newtag;
9814 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9815
9816 /* Architectures before V6KZ add features monotonically. */
9817 if (tagh <= TAG_CPU_ARCH_V6KZ)
9818 return result;
9819
9820 result = comb[tagh - T(V6T2)][tagl];
9821
9822 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9823 as the canonical version. */
9824 if (result == T(V4T_PLUS_V6_M))
9825 {
9826 result = T(V4T);
9827 *secondary_compat_out = T(V6_M);
9828 }
9829 else
9830 *secondary_compat_out = -1;
9831
9832 if (result == -1)
9833 {
9834 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9835 ibfd, oldtag, newtag);
9836 return -1;
9837 }
9838
9839 return result;
9840 #undef T
9841 }
9842
9843 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9844 are conflicting attributes. */
9845
9846 static bfd_boolean
9847 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9848 {
9849 obj_attribute *in_attr;
9850 obj_attribute *out_attr;
9851 obj_attribute_list *in_list;
9852 obj_attribute_list *out_list;
9853 obj_attribute_list **out_listp;
9854 /* Some tags have 0 = don't care, 1 = strong requirement,
9855 2 = weak requirement. */
9856 static const int order_021[3] = {0, 2, 1};
9857 int i;
9858 bfd_boolean result = TRUE;
9859
9860 /* Skip the linker stubs file. This preserves previous behavior
9861 of accepting unknown attributes in the first input file - but
9862 is that a bug? */
9863 if (ibfd->flags & BFD_LINKER_CREATED)
9864 return TRUE;
9865
9866 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9867 {
9868 /* This is the first object. Copy the attributes. */
9869 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9870
9871 out_attr = elf_known_obj_attributes_proc (obfd);
9872
9873 /* Use the Tag_null value to indicate the attributes have been
9874 initialized. */
9875 out_attr[0].i = 1;
9876
9877 /* We do not output objects with Tag_MPextension_use_legacy - we move
9878 the attribute's value to Tag_MPextension_use. */
9879 if (out_attr[Tag_MPextension_use_legacy].i != 0)
9880 {
9881 if (out_attr[Tag_MPextension_use].i != 0
9882 && out_attr[Tag_MPextension_use_legacy].i
9883 != out_attr[Tag_MPextension_use].i)
9884 {
9885 _bfd_error_handler
9886 (_("Error: %B has both the current and legacy "
9887 "Tag_MPextension_use attributes"), ibfd);
9888 result = FALSE;
9889 }
9890
9891 out_attr[Tag_MPextension_use] =
9892 out_attr[Tag_MPextension_use_legacy];
9893 out_attr[Tag_MPextension_use_legacy].type = 0;
9894 out_attr[Tag_MPextension_use_legacy].i = 0;
9895 }
9896
9897 return result;
9898 }
9899
9900 in_attr = elf_known_obj_attributes_proc (ibfd);
9901 out_attr = elf_known_obj_attributes_proc (obfd);
9902 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9903 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9904 {
9905 /* Ignore mismatches if the object doesn't use floating point. */
9906 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9907 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9908 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9909 {
9910 _bfd_error_handler
9911 (_("error: %B uses VFP register arguments, %B does not"),
9912 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9913 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9914 result = FALSE;
9915 }
9916 }
9917
9918 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9919 {
9920 /* Merge this attribute with existing attributes. */
9921 switch (i)
9922 {
9923 case Tag_CPU_raw_name:
9924 case Tag_CPU_name:
9925 /* These are merged after Tag_CPU_arch. */
9926 break;
9927
9928 case Tag_ABI_optimization_goals:
9929 case Tag_ABI_FP_optimization_goals:
9930 /* Use the first value seen. */
9931 break;
9932
9933 case Tag_CPU_arch:
9934 {
9935 int secondary_compat = -1, secondary_compat_out = -1;
9936 unsigned int saved_out_attr = out_attr[i].i;
9937 static const char *name_table[] = {
9938 /* These aren't real CPU names, but we can't guess
9939 that from the architecture version alone. */
9940 "Pre v4",
9941 "ARM v4",
9942 "ARM v4T",
9943 "ARM v5T",
9944 "ARM v5TE",
9945 "ARM v5TEJ",
9946 "ARM v6",
9947 "ARM v6KZ",
9948 "ARM v6T2",
9949 "ARM v6K",
9950 "ARM v7",
9951 "ARM v6-M",
9952 "ARM v6S-M"
9953 };
9954
9955 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9956 secondary_compat = get_secondary_compatible_arch (ibfd);
9957 secondary_compat_out = get_secondary_compatible_arch (obfd);
9958 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9959 &secondary_compat_out,
9960 in_attr[i].i,
9961 secondary_compat);
9962 set_secondary_compatible_arch (obfd, secondary_compat_out);
9963
9964 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9965 if (out_attr[i].i == saved_out_attr)
9966 ; /* Leave the names alone. */
9967 else if (out_attr[i].i == in_attr[i].i)
9968 {
9969 /* The output architecture has been changed to match the
9970 input architecture. Use the input names. */
9971 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9972 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9973 : NULL;
9974 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9975 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9976 : NULL;
9977 }
9978 else
9979 {
9980 out_attr[Tag_CPU_name].s = NULL;
9981 out_attr[Tag_CPU_raw_name].s = NULL;
9982 }
9983
9984 /* If we still don't have a value for Tag_CPU_name,
9985 make one up now. Tag_CPU_raw_name remains blank. */
9986 if (out_attr[Tag_CPU_name].s == NULL
9987 && out_attr[i].i < ARRAY_SIZE (name_table))
9988 out_attr[Tag_CPU_name].s =
9989 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9990 }
9991 break;
9992
9993 case Tag_ARM_ISA_use:
9994 case Tag_THUMB_ISA_use:
9995 case Tag_WMMX_arch:
9996 case Tag_Advanced_SIMD_arch:
9997 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9998 case Tag_ABI_FP_rounding:
9999 case Tag_ABI_FP_exceptions:
10000 case Tag_ABI_FP_user_exceptions:
10001 case Tag_ABI_FP_number_model:
10002 case Tag_VFP_HP_extension:
10003 case Tag_CPU_unaligned_access:
10004 case Tag_T2EE_use:
10005 case Tag_Virtualization_use:
10006 case Tag_MPextension_use:
10007 /* Use the largest value specified. */
10008 if (in_attr[i].i > out_attr[i].i)
10009 out_attr[i].i = in_attr[i].i;
10010 break;
10011
10012 case Tag_ABI_align8_preserved:
10013 case Tag_ABI_PCS_RO_data:
10014 /* Use the smallest value specified. */
10015 if (in_attr[i].i < out_attr[i].i)
10016 out_attr[i].i = in_attr[i].i;
10017 break;
10018
10019 case Tag_ABI_align8_needed:
10020 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
10021 && (in_attr[Tag_ABI_align8_preserved].i == 0
10022 || out_attr[Tag_ABI_align8_preserved].i == 0))
10023 {
10024 /* This error message should be enabled once all non-conformant
10025 binaries in the toolchain have had the attributes set
10026 properly.
10027 _bfd_error_handler
10028 (_("error: %B: 8-byte data alignment conflicts with %B"),
10029 obfd, ibfd);
10030 result = FALSE; */
10031 }
10032 /* Fall through. */
10033 case Tag_ABI_FP_denormal:
10034 case Tag_ABI_PCS_GOT_use:
10035 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
10036 value if greater than 2 (for future-proofing). */
10037 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
10038 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
10039 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
10040 out_attr[i].i = in_attr[i].i;
10041 break;
10042
10043
10044 case Tag_CPU_arch_profile:
10045 if (out_attr[i].i != in_attr[i].i)
10046 {
10047 /* 0 will merge with anything.
10048 'A' and 'S' merge to 'A'.
10049 'R' and 'S' merge to 'R'.
10050 'M' and 'A|R|S' is an error. */
10051 if (out_attr[i].i == 0
10052 || (out_attr[i].i == 'S'
10053 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
10054 out_attr[i].i = in_attr[i].i;
10055 else if (in_attr[i].i == 0
10056 || (in_attr[i].i == 'S'
10057 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
10058 ; /* Do nothing. */
10059 else
10060 {
10061 _bfd_error_handler
10062 (_("error: %B: Conflicting architecture profiles %c/%c"),
10063 ibfd,
10064 in_attr[i].i ? in_attr[i].i : '0',
10065 out_attr[i].i ? out_attr[i].i : '0');
10066 result = FALSE;
10067 }
10068 }
10069 break;
10070 case Tag_VFP_arch:
10071 {
10072 static const struct
10073 {
10074 int ver;
10075 int regs;
10076 } vfp_versions[7] =
10077 {
10078 {0, 0},
10079 {1, 16},
10080 {2, 16},
10081 {3, 32},
10082 {3, 16},
10083 {4, 32},
10084 {4, 16}
10085 };
10086 int ver;
10087 int regs;
10088 int newval;
10089
10090 /* Values greater than 6 aren't defined, so just pick the
10091 biggest */
10092 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10093 {
10094 out_attr[i] = in_attr[i];
10095 break;
10096 }
10097 /* The output uses the superset of input features
10098 (ISA version) and registers. */
10099 ver = vfp_versions[in_attr[i].i].ver;
10100 if (ver < vfp_versions[out_attr[i].i].ver)
10101 ver = vfp_versions[out_attr[i].i].ver;
10102 regs = vfp_versions[in_attr[i].i].regs;
10103 if (regs < vfp_versions[out_attr[i].i].regs)
10104 regs = vfp_versions[out_attr[i].i].regs;
10105 /* This assumes all possible supersets are also a valid
10106 options. */
10107 for (newval = 6; newval > 0; newval--)
10108 {
10109 if (regs == vfp_versions[newval].regs
10110 && ver == vfp_versions[newval].ver)
10111 break;
10112 }
10113 out_attr[i].i = newval;
10114 }
10115 break;
10116 case Tag_PCS_config:
10117 if (out_attr[i].i == 0)
10118 out_attr[i].i = in_attr[i].i;
10119 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10120 {
10121 /* It's sometimes ok to mix different configs, so this is only
10122 a warning. */
10123 _bfd_error_handler
10124 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10125 }
10126 break;
10127 case Tag_ABI_PCS_R9_use:
10128 if (in_attr[i].i != out_attr[i].i
10129 && out_attr[i].i != AEABI_R9_unused
10130 && in_attr[i].i != AEABI_R9_unused)
10131 {
10132 _bfd_error_handler
10133 (_("error: %B: Conflicting use of R9"), ibfd);
10134 result = FALSE;
10135 }
10136 if (out_attr[i].i == AEABI_R9_unused)
10137 out_attr[i].i = in_attr[i].i;
10138 break;
10139 case Tag_ABI_PCS_RW_data:
10140 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10141 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10142 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10143 {
10144 _bfd_error_handler
10145 (_("error: %B: SB relative addressing conflicts with use of R9"),
10146 ibfd);
10147 result = FALSE;
10148 }
10149 /* Use the smallest value specified. */
10150 if (in_attr[i].i < out_attr[i].i)
10151 out_attr[i].i = in_attr[i].i;
10152 break;
10153 case Tag_ABI_PCS_wchar_t:
10154 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10155 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10156 {
10157 _bfd_error_handler
10158 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10159 ibfd, in_attr[i].i, out_attr[i].i);
10160 }
10161 else if (in_attr[i].i && !out_attr[i].i)
10162 out_attr[i].i = in_attr[i].i;
10163 break;
10164 case Tag_ABI_enum_size:
10165 if (in_attr[i].i != AEABI_enum_unused)
10166 {
10167 if (out_attr[i].i == AEABI_enum_unused
10168 || out_attr[i].i == AEABI_enum_forced_wide)
10169 {
10170 /* The existing object is compatible with anything.
10171 Use whatever requirements the new object has. */
10172 out_attr[i].i = in_attr[i].i;
10173 }
10174 else if (in_attr[i].i != AEABI_enum_forced_wide
10175 && out_attr[i].i != in_attr[i].i
10176 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10177 {
10178 static const char *aeabi_enum_names[] =
10179 { "", "variable-size", "32-bit", "" };
10180 const char *in_name =
10181 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10182 ? aeabi_enum_names[in_attr[i].i]
10183 : "<unknown>";
10184 const char *out_name =
10185 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10186 ? aeabi_enum_names[out_attr[i].i]
10187 : "<unknown>";
10188 _bfd_error_handler
10189 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10190 ibfd, in_name, out_name);
10191 }
10192 }
10193 break;
10194 case Tag_ABI_VFP_args:
10195 /* Aready done. */
10196 break;
10197 case Tag_ABI_WMMX_args:
10198 if (in_attr[i].i != out_attr[i].i)
10199 {
10200 _bfd_error_handler
10201 (_("error: %B uses iWMMXt register arguments, %B does not"),
10202 ibfd, obfd);
10203 result = FALSE;
10204 }
10205 break;
10206 case Tag_compatibility:
10207 /* Merged in target-independent code. */
10208 break;
10209 case Tag_ABI_HardFP_use:
10210 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10211 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10212 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10213 out_attr[i].i = 3;
10214 else if (in_attr[i].i > out_attr[i].i)
10215 out_attr[i].i = in_attr[i].i;
10216 break;
10217 case Tag_ABI_FP_16bit_format:
10218 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10219 {
10220 if (in_attr[i].i != out_attr[i].i)
10221 {
10222 _bfd_error_handler
10223 (_("error: fp16 format mismatch between %B and %B"),
10224 ibfd, obfd);
10225 result = FALSE;
10226 }
10227 }
10228 if (in_attr[i].i != 0)
10229 out_attr[i].i = in_attr[i].i;
10230 break;
10231
10232 case Tag_DIV_use:
10233 /* This tag is set to zero if we can use UDIV and SDIV in Thumb
10234 mode on a v7-M or v7-R CPU; to one if we can not use UDIV or
10235 SDIV at all; and to two if we can use UDIV or SDIV on a v7-A
10236 CPU. We will merge as follows: If the input attribute's value
10237 is one then the output attribute's value remains unchanged. If
10238 the input attribute's value is zero or two then if the output
10239 attribute's value is one the output value is set to the input
10240 value, otherwise the output value must be the same as the
10241 inputs. */
10242 if (in_attr[i].i != 1 && out_attr[i].i != 1)
10243 {
10244 if (in_attr[i].i != out_attr[i].i)
10245 {
10246 _bfd_error_handler
10247 (_("DIV usage mismatch between %B and %B"),
10248 ibfd, obfd);
10249 result = FALSE;
10250 }
10251 }
10252
10253 if (in_attr[i].i != 1)
10254 out_attr[i].i = in_attr[i].i;
10255
10256 break;
10257
10258 case Tag_MPextension_use_legacy:
10259 /* We don't output objects with Tag_MPextension_use_legacy - we
10260 move the value to Tag_MPextension_use. */
10261 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
10262 {
10263 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
10264 {
10265 _bfd_error_handler
10266 (_("%B has has both the current and legacy "
10267 "Tag_MPextension_use attributes"),
10268 ibfd);
10269 result = FALSE;
10270 }
10271 }
10272
10273 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
10274 out_attr[Tag_MPextension_use] = in_attr[i];
10275
10276 break;
10277
10278 case Tag_nodefaults:
10279 /* This tag is set if it exists, but the value is unused (and is
10280 typically zero). We don't actually need to do anything here -
10281 the merge happens automatically when the type flags are merged
10282 below. */
10283 break;
10284 case Tag_also_compatible_with:
10285 /* Already done in Tag_CPU_arch. */
10286 break;
10287 case Tag_conformance:
10288 /* Keep the attribute if it matches. Throw it away otherwise.
10289 No attribute means no claim to conform. */
10290 if (!in_attr[i].s || !out_attr[i].s
10291 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10292 out_attr[i].s = NULL;
10293 break;
10294
10295 default:
10296 {
10297 bfd *err_bfd = NULL;
10298
10299 /* The "known_obj_attributes" table does contain some undefined
10300 attributes. Ensure that there are unused. */
10301 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10302 err_bfd = obfd;
10303 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10304 err_bfd = ibfd;
10305
10306 if (err_bfd != NULL)
10307 {
10308 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10309 if ((i & 127) < 64)
10310 {
10311 _bfd_error_handler
10312 (_("%B: Unknown mandatory EABI object attribute %d"),
10313 err_bfd, i);
10314 bfd_set_error (bfd_error_bad_value);
10315 result = FALSE;
10316 }
10317 else
10318 {
10319 _bfd_error_handler
10320 (_("Warning: %B: Unknown EABI object attribute %d"),
10321 err_bfd, i);
10322 }
10323 }
10324
10325 /* Only pass on attributes that match in both inputs. */
10326 if (in_attr[i].i != out_attr[i].i
10327 || in_attr[i].s != out_attr[i].s
10328 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10329 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10330 {
10331 out_attr[i].i = 0;
10332 out_attr[i].s = NULL;
10333 }
10334 }
10335 }
10336
10337 /* If out_attr was copied from in_attr then it won't have a type yet. */
10338 if (in_attr[i].type && !out_attr[i].type)
10339 out_attr[i].type = in_attr[i].type;
10340 }
10341
10342 /* Merge Tag_compatibility attributes and any common GNU ones. */
10343 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
10344 return FALSE;
10345
10346 /* Check for any attributes not known on ARM. */
10347 in_list = elf_other_obj_attributes_proc (ibfd);
10348 out_listp = &elf_other_obj_attributes_proc (obfd);
10349 out_list = *out_listp;
10350
10351 for (; in_list || out_list; )
10352 {
10353 bfd *err_bfd = NULL;
10354 int err_tag = 0;
10355
10356 /* The tags for each list are in numerical order. */
10357 /* If the tags are equal, then merge. */
10358 if (out_list && (!in_list || in_list->tag > out_list->tag))
10359 {
10360 /* This attribute only exists in obfd. We can't merge, and we don't
10361 know what the tag means, so delete it. */
10362 err_bfd = obfd;
10363 err_tag = out_list->tag;
10364 *out_listp = out_list->next;
10365 out_list = *out_listp;
10366 }
10367 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10368 {
10369 /* This attribute only exists in ibfd. We can't merge, and we don't
10370 know what the tag means, so ignore it. */
10371 err_bfd = ibfd;
10372 err_tag = in_list->tag;
10373 in_list = in_list->next;
10374 }
10375 else /* The tags are equal. */
10376 {
10377 /* As present, all attributes in the list are unknown, and
10378 therefore can't be merged meaningfully. */
10379 err_bfd = obfd;
10380 err_tag = out_list->tag;
10381
10382 /* Only pass on attributes that match in both inputs. */
10383 if (in_list->attr.i != out_list->attr.i
10384 || in_list->attr.s != out_list->attr.s
10385 || (in_list->attr.s && out_list->attr.s
10386 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10387 {
10388 /* No match. Delete the attribute. */
10389 *out_listp = out_list->next;
10390 out_list = *out_listp;
10391 }
10392 else
10393 {
10394 /* Matched. Keep the attribute and move to the next. */
10395 out_list = out_list->next;
10396 in_list = in_list->next;
10397 }
10398 }
10399
10400 if (err_bfd)
10401 {
10402 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10403 if ((err_tag & 127) < 64)
10404 {
10405 _bfd_error_handler
10406 (_("%B: Unknown mandatory EABI object attribute %d"),
10407 err_bfd, err_tag);
10408 bfd_set_error (bfd_error_bad_value);
10409 result = FALSE;
10410 }
10411 else
10412 {
10413 _bfd_error_handler
10414 (_("Warning: %B: Unknown EABI object attribute %d"),
10415 err_bfd, err_tag);
10416 }
10417 }
10418 }
10419 return result;
10420 }
10421
10422
10423 /* Return TRUE if the two EABI versions are incompatible. */
10424
10425 static bfd_boolean
10426 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10427 {
10428 /* v4 and v5 are the same spec before and after it was released,
10429 so allow mixing them. */
10430 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10431 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10432 return TRUE;
10433
10434 return (iver == over);
10435 }
10436
10437 /* Merge backend specific data from an object file to the output
10438 object file when linking. */
10439
10440 static bfd_boolean
10441 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10442
10443 /* Display the flags field. */
10444
10445 static bfd_boolean
10446 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10447 {
10448 FILE * file = (FILE *) ptr;
10449 unsigned long flags;
10450
10451 BFD_ASSERT (abfd != NULL && ptr != NULL);
10452
10453 /* Print normal ELF private data. */
10454 _bfd_elf_print_private_bfd_data (abfd, ptr);
10455
10456 flags = elf_elfheader (abfd)->e_flags;
10457 /* Ignore init flag - it may not be set, despite the flags field
10458 containing valid data. */
10459
10460 /* xgettext:c-format */
10461 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10462
10463 switch (EF_ARM_EABI_VERSION (flags))
10464 {
10465 case EF_ARM_EABI_UNKNOWN:
10466 /* The following flag bits are GNU extensions and not part of the
10467 official ARM ELF extended ABI. Hence they are only decoded if
10468 the EABI version is not set. */
10469 if (flags & EF_ARM_INTERWORK)
10470 fprintf (file, _(" [interworking enabled]"));
10471
10472 if (flags & EF_ARM_APCS_26)
10473 fprintf (file, " [APCS-26]");
10474 else
10475 fprintf (file, " [APCS-32]");
10476
10477 if (flags & EF_ARM_VFP_FLOAT)
10478 fprintf (file, _(" [VFP float format]"));
10479 else if (flags & EF_ARM_MAVERICK_FLOAT)
10480 fprintf (file, _(" [Maverick float format]"));
10481 else
10482 fprintf (file, _(" [FPA float format]"));
10483
10484 if (flags & EF_ARM_APCS_FLOAT)
10485 fprintf (file, _(" [floats passed in float registers]"));
10486
10487 if (flags & EF_ARM_PIC)
10488 fprintf (file, _(" [position independent]"));
10489
10490 if (flags & EF_ARM_NEW_ABI)
10491 fprintf (file, _(" [new ABI]"));
10492
10493 if (flags & EF_ARM_OLD_ABI)
10494 fprintf (file, _(" [old ABI]"));
10495
10496 if (flags & EF_ARM_SOFT_FLOAT)
10497 fprintf (file, _(" [software FP]"));
10498
10499 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10500 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10501 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10502 | EF_ARM_MAVERICK_FLOAT);
10503 break;
10504
10505 case EF_ARM_EABI_VER1:
10506 fprintf (file, _(" [Version1 EABI]"));
10507
10508 if (flags & EF_ARM_SYMSARESORTED)
10509 fprintf (file, _(" [sorted symbol table]"));
10510 else
10511 fprintf (file, _(" [unsorted symbol table]"));
10512
10513 flags &= ~ EF_ARM_SYMSARESORTED;
10514 break;
10515
10516 case EF_ARM_EABI_VER2:
10517 fprintf (file, _(" [Version2 EABI]"));
10518
10519 if (flags & EF_ARM_SYMSARESORTED)
10520 fprintf (file, _(" [sorted symbol table]"));
10521 else
10522 fprintf (file, _(" [unsorted symbol table]"));
10523
10524 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10525 fprintf (file, _(" [dynamic symbols use segment index]"));
10526
10527 if (flags & EF_ARM_MAPSYMSFIRST)
10528 fprintf (file, _(" [mapping symbols precede others]"));
10529
10530 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10531 | EF_ARM_MAPSYMSFIRST);
10532 break;
10533
10534 case EF_ARM_EABI_VER3:
10535 fprintf (file, _(" [Version3 EABI]"));
10536 break;
10537
10538 case EF_ARM_EABI_VER4:
10539 fprintf (file, _(" [Version4 EABI]"));
10540 goto eabi;
10541
10542 case EF_ARM_EABI_VER5:
10543 fprintf (file, _(" [Version5 EABI]"));
10544 eabi:
10545 if (flags & EF_ARM_BE8)
10546 fprintf (file, _(" [BE8]"));
10547
10548 if (flags & EF_ARM_LE8)
10549 fprintf (file, _(" [LE8]"));
10550
10551 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10552 break;
10553
10554 default:
10555 fprintf (file, _(" <EABI version unrecognised>"));
10556 break;
10557 }
10558
10559 flags &= ~ EF_ARM_EABIMASK;
10560
10561 if (flags & EF_ARM_RELEXEC)
10562 fprintf (file, _(" [relocatable executable]"));
10563
10564 if (flags & EF_ARM_HASENTRY)
10565 fprintf (file, _(" [has entry point]"));
10566
10567 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10568
10569 if (flags)
10570 fprintf (file, _("<Unrecognised flag bits set>"));
10571
10572 fputc ('\n', file);
10573
10574 return TRUE;
10575 }
10576
10577 static int
10578 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10579 {
10580 switch (ELF_ST_TYPE (elf_sym->st_info))
10581 {
10582 case STT_ARM_TFUNC:
10583 return ELF_ST_TYPE (elf_sym->st_info);
10584
10585 case STT_ARM_16BIT:
10586 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10587 This allows us to distinguish between data used by Thumb instructions
10588 and non-data (which is probably code) inside Thumb regions of an
10589 executable. */
10590 if (type != STT_OBJECT && type != STT_TLS)
10591 return ELF_ST_TYPE (elf_sym->st_info);
10592 break;
10593
10594 default:
10595 break;
10596 }
10597
10598 return type;
10599 }
10600
10601 static asection *
10602 elf32_arm_gc_mark_hook (asection *sec,
10603 struct bfd_link_info *info,
10604 Elf_Internal_Rela *rel,
10605 struct elf_link_hash_entry *h,
10606 Elf_Internal_Sym *sym)
10607 {
10608 if (h != NULL)
10609 switch (ELF32_R_TYPE (rel->r_info))
10610 {
10611 case R_ARM_GNU_VTINHERIT:
10612 case R_ARM_GNU_VTENTRY:
10613 return NULL;
10614 }
10615
10616 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10617 }
10618
10619 /* Update the got entry reference counts for the section being removed. */
10620
10621 static bfd_boolean
10622 elf32_arm_gc_sweep_hook (bfd * abfd,
10623 struct bfd_link_info * info,
10624 asection * sec,
10625 const Elf_Internal_Rela * relocs)
10626 {
10627 Elf_Internal_Shdr *symtab_hdr;
10628 struct elf_link_hash_entry **sym_hashes;
10629 bfd_signed_vma *local_got_refcounts;
10630 const Elf_Internal_Rela *rel, *relend;
10631 struct elf32_arm_link_hash_table * globals;
10632
10633 if (info->relocatable)
10634 return TRUE;
10635
10636 globals = elf32_arm_hash_table (info);
10637 if (globals == NULL)
10638 return FALSE;
10639
10640 elf_section_data (sec)->local_dynrel = NULL;
10641
10642 symtab_hdr = & elf_symtab_hdr (abfd);
10643 sym_hashes = elf_sym_hashes (abfd);
10644 local_got_refcounts = elf_local_got_refcounts (abfd);
10645
10646 check_use_blx (globals);
10647
10648 relend = relocs + sec->reloc_count;
10649 for (rel = relocs; rel < relend; rel++)
10650 {
10651 unsigned long r_symndx;
10652 struct elf_link_hash_entry *h = NULL;
10653 int r_type;
10654
10655 r_symndx = ELF32_R_SYM (rel->r_info);
10656 if (r_symndx >= symtab_hdr->sh_info)
10657 {
10658 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10659 while (h->root.type == bfd_link_hash_indirect
10660 || h->root.type == bfd_link_hash_warning)
10661 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10662 }
10663
10664 r_type = ELF32_R_TYPE (rel->r_info);
10665 r_type = arm_real_reloc_type (globals, r_type);
10666 switch (r_type)
10667 {
10668 case R_ARM_GOT32:
10669 case R_ARM_GOT_PREL:
10670 case R_ARM_TLS_GD32:
10671 case R_ARM_TLS_IE32:
10672 if (h != NULL)
10673 {
10674 if (h->got.refcount > 0)
10675 h->got.refcount -= 1;
10676 }
10677 else if (local_got_refcounts != NULL)
10678 {
10679 if (local_got_refcounts[r_symndx] > 0)
10680 local_got_refcounts[r_symndx] -= 1;
10681 }
10682 break;
10683
10684 case R_ARM_TLS_LDM32:
10685 globals->tls_ldm_got.refcount -= 1;
10686 break;
10687
10688 case R_ARM_ABS32:
10689 case R_ARM_ABS32_NOI:
10690 case R_ARM_REL32:
10691 case R_ARM_REL32_NOI:
10692 case R_ARM_PC24:
10693 case R_ARM_PLT32:
10694 case R_ARM_CALL:
10695 case R_ARM_JUMP24:
10696 case R_ARM_PREL31:
10697 case R_ARM_THM_CALL:
10698 case R_ARM_THM_JUMP24:
10699 case R_ARM_THM_JUMP19:
10700 case R_ARM_MOVW_ABS_NC:
10701 case R_ARM_MOVT_ABS:
10702 case R_ARM_MOVW_PREL_NC:
10703 case R_ARM_MOVT_PREL:
10704 case R_ARM_THM_MOVW_ABS_NC:
10705 case R_ARM_THM_MOVT_ABS:
10706 case R_ARM_THM_MOVW_PREL_NC:
10707 case R_ARM_THM_MOVT_PREL:
10708 /* Should the interworking branches be here also? */
10709
10710 if (h != NULL)
10711 {
10712 struct elf32_arm_link_hash_entry *eh;
10713 struct elf32_arm_relocs_copied **pp;
10714 struct elf32_arm_relocs_copied *p;
10715
10716 eh = (struct elf32_arm_link_hash_entry *) h;
10717
10718 if (h->plt.refcount > 0)
10719 {
10720 h->plt.refcount -= 1;
10721 if (r_type == R_ARM_THM_CALL)
10722 eh->plt_maybe_thumb_refcount--;
10723
10724 if (r_type == R_ARM_THM_JUMP24
10725 || r_type == R_ARM_THM_JUMP19)
10726 eh->plt_thumb_refcount--;
10727 }
10728
10729 if (r_type == R_ARM_ABS32
10730 || r_type == R_ARM_REL32
10731 || r_type == R_ARM_ABS32_NOI
10732 || r_type == R_ARM_REL32_NOI)
10733 {
10734 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10735 pp = &p->next)
10736 if (p->section == sec)
10737 {
10738 p->count -= 1;
10739 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10740 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10741 p->pc_count -= 1;
10742 if (p->count == 0)
10743 *pp = p->next;
10744 break;
10745 }
10746 }
10747 }
10748 break;
10749
10750 default:
10751 break;
10752 }
10753 }
10754
10755 return TRUE;
10756 }
10757
10758 /* Look through the relocs for a section during the first phase. */
10759
10760 static bfd_boolean
10761 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10762 asection *sec, const Elf_Internal_Rela *relocs)
10763 {
10764 Elf_Internal_Shdr *symtab_hdr;
10765 struct elf_link_hash_entry **sym_hashes;
10766 const Elf_Internal_Rela *rel;
10767 const Elf_Internal_Rela *rel_end;
10768 bfd *dynobj;
10769 asection *sreloc;
10770 bfd_vma *local_got_offsets;
10771 struct elf32_arm_link_hash_table *htab;
10772 bfd_boolean needs_plt;
10773 unsigned long nsyms;
10774
10775 if (info->relocatable)
10776 return TRUE;
10777
10778 BFD_ASSERT (is_arm_elf (abfd));
10779
10780 htab = elf32_arm_hash_table (info);
10781 if (htab == NULL)
10782 return FALSE;
10783
10784 sreloc = NULL;
10785
10786 /* Create dynamic sections for relocatable executables so that we can
10787 copy relocations. */
10788 if (htab->root.is_relocatable_executable
10789 && ! htab->root.dynamic_sections_created)
10790 {
10791 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10792 return FALSE;
10793 }
10794
10795 dynobj = elf_hash_table (info)->dynobj;
10796 local_got_offsets = elf_local_got_offsets (abfd);
10797
10798 symtab_hdr = & elf_symtab_hdr (abfd);
10799 sym_hashes = elf_sym_hashes (abfd);
10800 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10801
10802 rel_end = relocs + sec->reloc_count;
10803 for (rel = relocs; rel < rel_end; rel++)
10804 {
10805 struct elf_link_hash_entry *h;
10806 struct elf32_arm_link_hash_entry *eh;
10807 unsigned long r_symndx;
10808 int r_type;
10809
10810 r_symndx = ELF32_R_SYM (rel->r_info);
10811 r_type = ELF32_R_TYPE (rel->r_info);
10812 r_type = arm_real_reloc_type (htab, r_type);
10813
10814 if (r_symndx >= nsyms
10815 /* PR 9934: It is possible to have relocations that do not
10816 refer to symbols, thus it is also possible to have an
10817 object file containing relocations but no symbol table. */
10818 && (r_symndx > 0 || nsyms > 0))
10819 {
10820 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10821 r_symndx);
10822 return FALSE;
10823 }
10824
10825 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10826 h = NULL;
10827 else
10828 {
10829 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10830 while (h->root.type == bfd_link_hash_indirect
10831 || h->root.type == bfd_link_hash_warning)
10832 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10833 }
10834
10835 eh = (struct elf32_arm_link_hash_entry *) h;
10836
10837 switch (r_type)
10838 {
10839 case R_ARM_GOT32:
10840 case R_ARM_GOT_PREL:
10841 case R_ARM_TLS_GD32:
10842 case R_ARM_TLS_IE32:
10843 /* This symbol requires a global offset table entry. */
10844 {
10845 int tls_type, old_tls_type;
10846
10847 switch (r_type)
10848 {
10849 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10850 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10851 default: tls_type = GOT_NORMAL; break;
10852 }
10853
10854 if (h != NULL)
10855 {
10856 h->got.refcount++;
10857 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10858 }
10859 else
10860 {
10861 bfd_signed_vma *local_got_refcounts;
10862
10863 /* This is a global offset table entry for a local symbol. */
10864 local_got_refcounts = elf_local_got_refcounts (abfd);
10865 if (local_got_refcounts == NULL)
10866 {
10867 bfd_size_type size;
10868
10869 size = symtab_hdr->sh_info;
10870 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10871 local_got_refcounts = (bfd_signed_vma *)
10872 bfd_zalloc (abfd, size);
10873 if (local_got_refcounts == NULL)
10874 return FALSE;
10875 elf_local_got_refcounts (abfd) = local_got_refcounts;
10876 elf32_arm_local_got_tls_type (abfd)
10877 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10878 }
10879 local_got_refcounts[r_symndx] += 1;
10880 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10881 }
10882
10883 /* We will already have issued an error message if there is a
10884 TLS / non-TLS mismatch, based on the symbol type. We don't
10885 support any linker relaxations. So just combine any TLS
10886 types needed. */
10887 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10888 && tls_type != GOT_NORMAL)
10889 tls_type |= old_tls_type;
10890
10891 if (old_tls_type != tls_type)
10892 {
10893 if (h != NULL)
10894 elf32_arm_hash_entry (h)->tls_type = tls_type;
10895 else
10896 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10897 }
10898 }
10899 /* Fall through. */
10900
10901 case R_ARM_TLS_LDM32:
10902 if (r_type == R_ARM_TLS_LDM32)
10903 htab->tls_ldm_got.refcount++;
10904 /* Fall through. */
10905
10906 case R_ARM_GOTOFF32:
10907 case R_ARM_GOTPC:
10908 if (htab->sgot == NULL)
10909 {
10910 if (htab->root.dynobj == NULL)
10911 htab->root.dynobj = abfd;
10912 if (!create_got_section (htab->root.dynobj, info))
10913 return FALSE;
10914 }
10915 break;
10916
10917 case R_ARM_ABS12:
10918 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10919 ldr __GOTT_INDEX__ offsets. */
10920 if (!htab->vxworks_p)
10921 break;
10922 /* Fall through. */
10923
10924 case R_ARM_PC24:
10925 case R_ARM_PLT32:
10926 case R_ARM_CALL:
10927 case R_ARM_JUMP24:
10928 case R_ARM_PREL31:
10929 case R_ARM_THM_CALL:
10930 case R_ARM_THM_JUMP24:
10931 case R_ARM_THM_JUMP19:
10932 needs_plt = 1;
10933 goto normal_reloc;
10934
10935 case R_ARM_MOVW_ABS_NC:
10936 case R_ARM_MOVT_ABS:
10937 case R_ARM_THM_MOVW_ABS_NC:
10938 case R_ARM_THM_MOVT_ABS:
10939 if (info->shared)
10940 {
10941 (*_bfd_error_handler)
10942 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10943 abfd, elf32_arm_howto_table_1[r_type].name,
10944 (h) ? h->root.root.string : "a local symbol");
10945 bfd_set_error (bfd_error_bad_value);
10946 return FALSE;
10947 }
10948
10949 /* Fall through. */
10950 case R_ARM_ABS32:
10951 case R_ARM_ABS32_NOI:
10952 case R_ARM_REL32:
10953 case R_ARM_REL32_NOI:
10954 case R_ARM_MOVW_PREL_NC:
10955 case R_ARM_MOVT_PREL:
10956 case R_ARM_THM_MOVW_PREL_NC:
10957 case R_ARM_THM_MOVT_PREL:
10958 needs_plt = 0;
10959 normal_reloc:
10960
10961 /* Should the interworking branches be listed here? */
10962 if (h != NULL)
10963 {
10964 /* If this reloc is in a read-only section, we might
10965 need a copy reloc. We can't check reliably at this
10966 stage whether the section is read-only, as input
10967 sections have not yet been mapped to output sections.
10968 Tentatively set the flag for now, and correct in
10969 adjust_dynamic_symbol. */
10970 if (!info->shared)
10971 h->non_got_ref = 1;
10972
10973 /* We may need a .plt entry if the function this reloc
10974 refers to is in a different object. We can't tell for
10975 sure yet, because something later might force the
10976 symbol local. */
10977 if (needs_plt)
10978 h->needs_plt = 1;
10979
10980 /* If we create a PLT entry, this relocation will reference
10981 it, even if it's an ABS32 relocation. */
10982 h->plt.refcount += 1;
10983
10984 /* It's too early to use htab->use_blx here, so we have to
10985 record possible blx references separately from
10986 relocs that definitely need a thumb stub. */
10987
10988 if (r_type == R_ARM_THM_CALL)
10989 eh->plt_maybe_thumb_refcount += 1;
10990
10991 if (r_type == R_ARM_THM_JUMP24
10992 || r_type == R_ARM_THM_JUMP19)
10993 eh->plt_thumb_refcount += 1;
10994 }
10995
10996 /* If we are creating a shared library or relocatable executable,
10997 and this is a reloc against a global symbol, or a non PC
10998 relative reloc against a local symbol, then we need to copy
10999 the reloc into the shared library. However, if we are linking
11000 with -Bsymbolic, we do not need to copy a reloc against a
11001 global symbol which is defined in an object we are
11002 including in the link (i.e., DEF_REGULAR is set). At
11003 this point we have not seen all the input files, so it is
11004 possible that DEF_REGULAR is not set now but will be set
11005 later (it is never cleared). We account for that
11006 possibility below by storing information in the
11007 relocs_copied field of the hash table entry. */
11008 if ((info->shared || htab->root.is_relocatable_executable)
11009 && (sec->flags & SEC_ALLOC) != 0
11010 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
11011 || (h != NULL && ! h->needs_plt
11012 && (! info->symbolic || ! h->def_regular))))
11013 {
11014 struct elf32_arm_relocs_copied *p, **head;
11015
11016 /* When creating a shared object, we must copy these
11017 reloc types into the output file. We create a reloc
11018 section in dynobj and make room for this reloc. */
11019 if (sreloc == NULL)
11020 {
11021 sreloc = _bfd_elf_make_dynamic_reloc_section
11022 (sec, dynobj, 2, abfd, ! htab->use_rel);
11023
11024 if (sreloc == NULL)
11025 return FALSE;
11026
11027 /* BPABI objects never have dynamic relocations mapped. */
11028 if (htab->symbian_p)
11029 {
11030 flagword flags;
11031
11032 flags = bfd_get_section_flags (dynobj, sreloc);
11033 flags &= ~(SEC_LOAD | SEC_ALLOC);
11034 bfd_set_section_flags (dynobj, sreloc, flags);
11035 }
11036 }
11037
11038 /* If this is a global symbol, we count the number of
11039 relocations we need for this symbol. */
11040 if (h != NULL)
11041 {
11042 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
11043 }
11044 else
11045 {
11046 /* Track dynamic relocs needed for local syms too.
11047 We really need local syms available to do this
11048 easily. Oh well. */
11049 asection *s;
11050 void *vpp;
11051 Elf_Internal_Sym *isym;
11052
11053 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
11054 abfd, r_symndx);
11055 if (isym == NULL)
11056 return FALSE;
11057
11058 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
11059 if (s == NULL)
11060 s = sec;
11061
11062 vpp = &elf_section_data (s)->local_dynrel;
11063 head = (struct elf32_arm_relocs_copied **) vpp;
11064 }
11065
11066 p = *head;
11067 if (p == NULL || p->section != sec)
11068 {
11069 bfd_size_type amt = sizeof *p;
11070
11071 p = (struct elf32_arm_relocs_copied *)
11072 bfd_alloc (htab->root.dynobj, amt);
11073 if (p == NULL)
11074 return FALSE;
11075 p->next = *head;
11076 *head = p;
11077 p->section = sec;
11078 p->count = 0;
11079 p->pc_count = 0;
11080 }
11081
11082 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
11083 p->pc_count += 1;
11084 p->count += 1;
11085 }
11086 break;
11087
11088 /* This relocation describes the C++ object vtable hierarchy.
11089 Reconstruct it for later use during GC. */
11090 case R_ARM_GNU_VTINHERIT:
11091 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
11092 return FALSE;
11093 break;
11094
11095 /* This relocation describes which C++ vtable entries are actually
11096 used. Record for later use during GC. */
11097 case R_ARM_GNU_VTENTRY:
11098 BFD_ASSERT (h != NULL);
11099 if (h != NULL
11100 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
11101 return FALSE;
11102 break;
11103 }
11104 }
11105
11106 return TRUE;
11107 }
11108
11109 /* Unwinding tables are not referenced directly. This pass marks them as
11110 required if the corresponding code section is marked. */
11111
11112 static bfd_boolean
11113 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
11114 elf_gc_mark_hook_fn gc_mark_hook)
11115 {
11116 bfd *sub;
11117 Elf_Internal_Shdr **elf_shdrp;
11118 bfd_boolean again;
11119
11120 /* Marking EH data may cause additional code sections to be marked,
11121 requiring multiple passes. */
11122 again = TRUE;
11123 while (again)
11124 {
11125 again = FALSE;
11126 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11127 {
11128 asection *o;
11129
11130 if (! is_arm_elf (sub))
11131 continue;
11132
11133 elf_shdrp = elf_elfsections (sub);
11134 for (o = sub->sections; o != NULL; o = o->next)
11135 {
11136 Elf_Internal_Shdr *hdr;
11137
11138 hdr = &elf_section_data (o)->this_hdr;
11139 if (hdr->sh_type == SHT_ARM_EXIDX
11140 && hdr->sh_link
11141 && hdr->sh_link < elf_numsections (sub)
11142 && !o->gc_mark
11143 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11144 {
11145 again = TRUE;
11146 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11147 return FALSE;
11148 }
11149 }
11150 }
11151 }
11152
11153 return TRUE;
11154 }
11155
11156 /* Treat mapping symbols as special target symbols. */
11157
11158 static bfd_boolean
11159 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11160 {
11161 return bfd_is_arm_special_symbol_name (sym->name,
11162 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11163 }
11164
11165 /* This is a copy of elf_find_function() from elf.c except that
11166 ARM mapping symbols are ignored when looking for function names
11167 and STT_ARM_TFUNC is considered to a function type. */
11168
11169 static bfd_boolean
11170 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11171 asection * section,
11172 asymbol ** symbols,
11173 bfd_vma offset,
11174 const char ** filename_ptr,
11175 const char ** functionname_ptr)
11176 {
11177 const char * filename = NULL;
11178 asymbol * func = NULL;
11179 bfd_vma low_func = 0;
11180 asymbol ** p;
11181
11182 for (p = symbols; *p != NULL; p++)
11183 {
11184 elf_symbol_type *q;
11185
11186 q = (elf_symbol_type *) *p;
11187
11188 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11189 {
11190 default:
11191 break;
11192 case STT_FILE:
11193 filename = bfd_asymbol_name (&q->symbol);
11194 break;
11195 case STT_FUNC:
11196 case STT_ARM_TFUNC:
11197 case STT_NOTYPE:
11198 /* Skip mapping symbols. */
11199 if ((q->symbol.flags & BSF_LOCAL)
11200 && bfd_is_arm_special_symbol_name (q->symbol.name,
11201 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11202 continue;
11203 /* Fall through. */
11204 if (bfd_get_section (&q->symbol) == section
11205 && q->symbol.value >= low_func
11206 && q->symbol.value <= offset)
11207 {
11208 func = (asymbol *) q;
11209 low_func = q->symbol.value;
11210 }
11211 break;
11212 }
11213 }
11214
11215 if (func == NULL)
11216 return FALSE;
11217
11218 if (filename_ptr)
11219 *filename_ptr = filename;
11220 if (functionname_ptr)
11221 *functionname_ptr = bfd_asymbol_name (func);
11222
11223 return TRUE;
11224 }
11225
11226
11227 /* Find the nearest line to a particular section and offset, for error
11228 reporting. This code is a duplicate of the code in elf.c, except
11229 that it uses arm_elf_find_function. */
11230
11231 static bfd_boolean
11232 elf32_arm_find_nearest_line (bfd * abfd,
11233 asection * section,
11234 asymbol ** symbols,
11235 bfd_vma offset,
11236 const char ** filename_ptr,
11237 const char ** functionname_ptr,
11238 unsigned int * line_ptr)
11239 {
11240 bfd_boolean found = FALSE;
11241
11242 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11243
11244 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11245 filename_ptr, functionname_ptr,
11246 line_ptr, 0,
11247 & elf_tdata (abfd)->dwarf2_find_line_info))
11248 {
11249 if (!*functionname_ptr)
11250 arm_elf_find_function (abfd, section, symbols, offset,
11251 *filename_ptr ? NULL : filename_ptr,
11252 functionname_ptr);
11253
11254 return TRUE;
11255 }
11256
11257 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11258 & found, filename_ptr,
11259 functionname_ptr, line_ptr,
11260 & elf_tdata (abfd)->line_info))
11261 return FALSE;
11262
11263 if (found && (*functionname_ptr || *line_ptr))
11264 return TRUE;
11265
11266 if (symbols == NULL)
11267 return FALSE;
11268
11269 if (! arm_elf_find_function (abfd, section, symbols, offset,
11270 filename_ptr, functionname_ptr))
11271 return FALSE;
11272
11273 *line_ptr = 0;
11274 return TRUE;
11275 }
11276
11277 static bfd_boolean
11278 elf32_arm_find_inliner_info (bfd * abfd,
11279 const char ** filename_ptr,
11280 const char ** functionname_ptr,
11281 unsigned int * line_ptr)
11282 {
11283 bfd_boolean found;
11284 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11285 functionname_ptr, line_ptr,
11286 & elf_tdata (abfd)->dwarf2_find_line_info);
11287 return found;
11288 }
11289
11290 /* Adjust a symbol defined by a dynamic object and referenced by a
11291 regular object. The current definition is in some section of the
11292 dynamic object, but we're not including those sections. We have to
11293 change the definition to something the rest of the link can
11294 understand. */
11295
11296 static bfd_boolean
11297 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11298 struct elf_link_hash_entry * h)
11299 {
11300 bfd * dynobj;
11301 asection * s;
11302 struct elf32_arm_link_hash_entry * eh;
11303 struct elf32_arm_link_hash_table *globals;
11304
11305 globals = elf32_arm_hash_table (info);
11306 if (globals == NULL)
11307 return FALSE;
11308
11309 dynobj = elf_hash_table (info)->dynobj;
11310
11311 /* Make sure we know what is going on here. */
11312 BFD_ASSERT (dynobj != NULL
11313 && (h->needs_plt
11314 || h->u.weakdef != NULL
11315 || (h->def_dynamic
11316 && h->ref_regular
11317 && !h->def_regular)));
11318
11319 eh = (struct elf32_arm_link_hash_entry *) h;
11320
11321 /* If this is a function, put it in the procedure linkage table. We
11322 will fill in the contents of the procedure linkage table later,
11323 when we know the address of the .got section. */
11324 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11325 || h->needs_plt)
11326 {
11327 if (h->plt.refcount <= 0
11328 || SYMBOL_CALLS_LOCAL (info, h)
11329 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11330 && h->root.type == bfd_link_hash_undefweak))
11331 {
11332 /* This case can occur if we saw a PLT32 reloc in an input
11333 file, but the symbol was never referred to by a dynamic
11334 object, or if all references were garbage collected. In
11335 such a case, we don't actually need to build a procedure
11336 linkage table, and we can just do a PC24 reloc instead. */
11337 h->plt.offset = (bfd_vma) -1;
11338 eh->plt_thumb_refcount = 0;
11339 eh->plt_maybe_thumb_refcount = 0;
11340 h->needs_plt = 0;
11341 }
11342
11343 return TRUE;
11344 }
11345 else
11346 {
11347 /* It's possible that we incorrectly decided a .plt reloc was
11348 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11349 in check_relocs. We can't decide accurately between function
11350 and non-function syms in check-relocs; Objects loaded later in
11351 the link may change h->type. So fix it now. */
11352 h->plt.offset = (bfd_vma) -1;
11353 eh->plt_thumb_refcount = 0;
11354 eh->plt_maybe_thumb_refcount = 0;
11355 }
11356
11357 /* If this is a weak symbol, and there is a real definition, the
11358 processor independent code will have arranged for us to see the
11359 real definition first, and we can just use the same value. */
11360 if (h->u.weakdef != NULL)
11361 {
11362 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11363 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11364 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11365 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11366 return TRUE;
11367 }
11368
11369 /* If there are no non-GOT references, we do not need a copy
11370 relocation. */
11371 if (!h->non_got_ref)
11372 return TRUE;
11373
11374 /* This is a reference to a symbol defined by a dynamic object which
11375 is not a function. */
11376
11377 /* If we are creating a shared library, we must presume that the
11378 only references to the symbol are via the global offset table.
11379 For such cases we need not do anything here; the relocations will
11380 be handled correctly by relocate_section. Relocatable executables
11381 can reference data in shared objects directly, so we don't need to
11382 do anything here. */
11383 if (info->shared || globals->root.is_relocatable_executable)
11384 return TRUE;
11385
11386 if (h->size == 0)
11387 {
11388 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11389 h->root.root.string);
11390 return TRUE;
11391 }
11392
11393 /* We must allocate the symbol in our .dynbss section, which will
11394 become part of the .bss section of the executable. There will be
11395 an entry for this symbol in the .dynsym section. The dynamic
11396 object will contain position independent code, so all references
11397 from the dynamic object to this symbol will go through the global
11398 offset table. The dynamic linker will use the .dynsym entry to
11399 determine the address it must put in the global offset table, so
11400 both the dynamic object and the regular object will refer to the
11401 same memory location for the variable. */
11402 s = bfd_get_section_by_name (dynobj, ".dynbss");
11403 BFD_ASSERT (s != NULL);
11404
11405 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11406 copy the initial value out of the dynamic object and into the
11407 runtime process image. We need to remember the offset into the
11408 .rel(a).bss section we are going to use. */
11409 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11410 {
11411 asection *srel;
11412
11413 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11414 BFD_ASSERT (srel != NULL);
11415 srel->size += RELOC_SIZE (globals);
11416 h->needs_copy = 1;
11417 }
11418
11419 return _bfd_elf_adjust_dynamic_copy (h, s);
11420 }
11421
11422 /* Allocate space in .plt, .got and associated reloc sections for
11423 dynamic relocs. */
11424
11425 static bfd_boolean
11426 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11427 {
11428 struct bfd_link_info *info;
11429 struct elf32_arm_link_hash_table *htab;
11430 struct elf32_arm_link_hash_entry *eh;
11431 struct elf32_arm_relocs_copied *p;
11432 bfd_signed_vma thumb_refs;
11433
11434 eh = (struct elf32_arm_link_hash_entry *) h;
11435
11436 if (h->root.type == bfd_link_hash_indirect)
11437 return TRUE;
11438
11439 if (h->root.type == bfd_link_hash_warning)
11440 /* When warning symbols are created, they **replace** the "real"
11441 entry in the hash table, thus we never get to see the real
11442 symbol in a hash traversal. So look at it now. */
11443 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11444
11445 info = (struct bfd_link_info *) inf;
11446 htab = elf32_arm_hash_table (info);
11447 if (htab == NULL)
11448 return FALSE;
11449
11450 if (htab->root.dynamic_sections_created
11451 && h->plt.refcount > 0)
11452 {
11453 /* Make sure this symbol is output as a dynamic symbol.
11454 Undefined weak syms won't yet be marked as dynamic. */
11455 if (h->dynindx == -1
11456 && !h->forced_local)
11457 {
11458 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11459 return FALSE;
11460 }
11461
11462 if (info->shared
11463 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11464 {
11465 asection *s = htab->splt;
11466
11467 /* If this is the first .plt entry, make room for the special
11468 first entry. */
11469 if (s->size == 0)
11470 s->size += htab->plt_header_size;
11471
11472 h->plt.offset = s->size;
11473
11474 /* If we will insert a Thumb trampoline before this PLT, leave room
11475 for it. */
11476 thumb_refs = eh->plt_thumb_refcount;
11477 if (!htab->use_blx)
11478 thumb_refs += eh->plt_maybe_thumb_refcount;
11479
11480 if (thumb_refs > 0)
11481 {
11482 h->plt.offset += PLT_THUMB_STUB_SIZE;
11483 s->size += PLT_THUMB_STUB_SIZE;
11484 }
11485
11486 /* If this symbol is not defined in a regular file, and we are
11487 not generating a shared library, then set the symbol to this
11488 location in the .plt. This is required to make function
11489 pointers compare as equal between the normal executable and
11490 the shared library. */
11491 if (! info->shared
11492 && !h->def_regular)
11493 {
11494 h->root.u.def.section = s;
11495 h->root.u.def.value = h->plt.offset;
11496
11497 /* Make sure the function is not marked as Thumb, in case
11498 it is the target of an ABS32 relocation, which will
11499 point to the PLT entry. */
11500 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11501 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11502 }
11503
11504 /* Make room for this entry. */
11505 s->size += htab->plt_entry_size;
11506
11507 if (!htab->symbian_p)
11508 {
11509 /* We also need to make an entry in the .got.plt section, which
11510 will be placed in the .got section by the linker script. */
11511 eh->plt_got_offset = htab->sgotplt->size;
11512 htab->sgotplt->size += 4;
11513 }
11514
11515 /* We also need to make an entry in the .rel(a).plt section. */
11516 htab->srelplt->size += RELOC_SIZE (htab);
11517
11518 /* VxWorks executables have a second set of relocations for
11519 each PLT entry. They go in a separate relocation section,
11520 which is processed by the kernel loader. */
11521 if (htab->vxworks_p && !info->shared)
11522 {
11523 /* There is a relocation for the initial PLT entry:
11524 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11525 if (h->plt.offset == htab->plt_header_size)
11526 htab->srelplt2->size += RELOC_SIZE (htab);
11527
11528 /* There are two extra relocations for each subsequent
11529 PLT entry: an R_ARM_32 relocation for the GOT entry,
11530 and an R_ARM_32 relocation for the PLT entry. */
11531 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11532 }
11533 }
11534 else
11535 {
11536 h->plt.offset = (bfd_vma) -1;
11537 h->needs_plt = 0;
11538 }
11539 }
11540 else
11541 {
11542 h->plt.offset = (bfd_vma) -1;
11543 h->needs_plt = 0;
11544 }
11545
11546 if (h->got.refcount > 0)
11547 {
11548 asection *s;
11549 bfd_boolean dyn;
11550 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11551 int indx;
11552
11553 /* Make sure this symbol is output as a dynamic symbol.
11554 Undefined weak syms won't yet be marked as dynamic. */
11555 if (h->dynindx == -1
11556 && !h->forced_local)
11557 {
11558 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11559 return FALSE;
11560 }
11561
11562 if (!htab->symbian_p)
11563 {
11564 s = htab->sgot;
11565 h->got.offset = s->size;
11566
11567 if (tls_type == GOT_UNKNOWN)
11568 abort ();
11569
11570 if (tls_type == GOT_NORMAL)
11571 /* Non-TLS symbols need one GOT slot. */
11572 s->size += 4;
11573 else
11574 {
11575 if (tls_type & GOT_TLS_GD)
11576 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11577 s->size += 8;
11578 if (tls_type & GOT_TLS_IE)
11579 /* R_ARM_TLS_IE32 needs one GOT slot. */
11580 s->size += 4;
11581 }
11582
11583 dyn = htab->root.dynamic_sections_created;
11584
11585 indx = 0;
11586 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11587 && (!info->shared
11588 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11589 indx = h->dynindx;
11590
11591 if (tls_type != GOT_NORMAL
11592 && (info->shared || indx != 0)
11593 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11594 || h->root.type != bfd_link_hash_undefweak))
11595 {
11596 if (tls_type & GOT_TLS_IE)
11597 htab->srelgot->size += RELOC_SIZE (htab);
11598
11599 if (tls_type & GOT_TLS_GD)
11600 htab->srelgot->size += RELOC_SIZE (htab);
11601
11602 if ((tls_type & GOT_TLS_GD) && indx != 0)
11603 htab->srelgot->size += RELOC_SIZE (htab);
11604 }
11605 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11606 || h->root.type != bfd_link_hash_undefweak)
11607 && (info->shared
11608 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11609 htab->srelgot->size += RELOC_SIZE (htab);
11610 }
11611 }
11612 else
11613 h->got.offset = (bfd_vma) -1;
11614
11615 /* Allocate stubs for exported Thumb functions on v4t. */
11616 if (!htab->use_blx && h->dynindx != -1
11617 && h->def_regular
11618 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11619 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11620 {
11621 struct elf_link_hash_entry * th;
11622 struct bfd_link_hash_entry * bh;
11623 struct elf_link_hash_entry * myh;
11624 char name[1024];
11625 asection *s;
11626 bh = NULL;
11627 /* Create a new symbol to regist the real location of the function. */
11628 s = h->root.u.def.section;
11629 sprintf (name, "__real_%s", h->root.root.string);
11630 _bfd_generic_link_add_one_symbol (info, s->owner,
11631 name, BSF_GLOBAL, s,
11632 h->root.u.def.value,
11633 NULL, TRUE, FALSE, &bh);
11634
11635 myh = (struct elf_link_hash_entry *) bh;
11636 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11637 myh->forced_local = 1;
11638 eh->export_glue = myh;
11639 th = record_arm_to_thumb_glue (info, h);
11640 /* Point the symbol at the stub. */
11641 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11642 h->root.u.def.section = th->root.u.def.section;
11643 h->root.u.def.value = th->root.u.def.value & ~1;
11644 }
11645
11646 if (eh->relocs_copied == NULL)
11647 return TRUE;
11648
11649 /* In the shared -Bsymbolic case, discard space allocated for
11650 dynamic pc-relative relocs against symbols which turn out to be
11651 defined in regular objects. For the normal shared case, discard
11652 space for pc-relative relocs that have become local due to symbol
11653 visibility changes. */
11654
11655 if (info->shared || htab->root.is_relocatable_executable)
11656 {
11657 /* The only relocs that use pc_count are R_ARM_REL32 and
11658 R_ARM_REL32_NOI, which will appear on something like
11659 ".long foo - .". We want calls to protected symbols to resolve
11660 directly to the function rather than going via the plt. If people
11661 want function pointer comparisons to work as expected then they
11662 should avoid writing assembly like ".long foo - .". */
11663 if (SYMBOL_CALLS_LOCAL (info, h))
11664 {
11665 struct elf32_arm_relocs_copied **pp;
11666
11667 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11668 {
11669 p->count -= p->pc_count;
11670 p->pc_count = 0;
11671 if (p->count == 0)
11672 *pp = p->next;
11673 else
11674 pp = &p->next;
11675 }
11676 }
11677
11678 if (htab->vxworks_p)
11679 {
11680 struct elf32_arm_relocs_copied **pp;
11681
11682 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11683 {
11684 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11685 *pp = p->next;
11686 else
11687 pp = &p->next;
11688 }
11689 }
11690
11691 /* Also discard relocs on undefined weak syms with non-default
11692 visibility. */
11693 if (eh->relocs_copied != NULL
11694 && h->root.type == bfd_link_hash_undefweak)
11695 {
11696 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11697 eh->relocs_copied = NULL;
11698
11699 /* Make sure undefined weak symbols are output as a dynamic
11700 symbol in PIEs. */
11701 else if (h->dynindx == -1
11702 && !h->forced_local)
11703 {
11704 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11705 return FALSE;
11706 }
11707 }
11708
11709 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11710 && h->root.type == bfd_link_hash_new)
11711 {
11712 /* Output absolute symbols so that we can create relocations
11713 against them. For normal symbols we output a relocation
11714 against the section that contains them. */
11715 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11716 return FALSE;
11717 }
11718
11719 }
11720 else
11721 {
11722 /* For the non-shared case, discard space for relocs against
11723 symbols which turn out to need copy relocs or are not
11724 dynamic. */
11725
11726 if (!h->non_got_ref
11727 && ((h->def_dynamic
11728 && !h->def_regular)
11729 || (htab->root.dynamic_sections_created
11730 && (h->root.type == bfd_link_hash_undefweak
11731 || h->root.type == bfd_link_hash_undefined))))
11732 {
11733 /* Make sure this symbol is output as a dynamic symbol.
11734 Undefined weak syms won't yet be marked as dynamic. */
11735 if (h->dynindx == -1
11736 && !h->forced_local)
11737 {
11738 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11739 return FALSE;
11740 }
11741
11742 /* If that succeeded, we know we'll be keeping all the
11743 relocs. */
11744 if (h->dynindx != -1)
11745 goto keep;
11746 }
11747
11748 eh->relocs_copied = NULL;
11749
11750 keep: ;
11751 }
11752
11753 /* Finally, allocate space. */
11754 for (p = eh->relocs_copied; p != NULL; p = p->next)
11755 {
11756 asection *sreloc = elf_section_data (p->section)->sreloc;
11757 sreloc->size += p->count * RELOC_SIZE (htab);
11758 }
11759
11760 return TRUE;
11761 }
11762
11763 /* Find any dynamic relocs that apply to read-only sections. */
11764
11765 static bfd_boolean
11766 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11767 {
11768 struct elf32_arm_link_hash_entry * eh;
11769 struct elf32_arm_relocs_copied * p;
11770
11771 if (h->root.type == bfd_link_hash_warning)
11772 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11773
11774 eh = (struct elf32_arm_link_hash_entry *) h;
11775 for (p = eh->relocs_copied; p != NULL; p = p->next)
11776 {
11777 asection *s = p->section;
11778
11779 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11780 {
11781 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11782
11783 info->flags |= DF_TEXTREL;
11784
11785 /* Not an error, just cut short the traversal. */
11786 return FALSE;
11787 }
11788 }
11789 return TRUE;
11790 }
11791
11792 void
11793 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11794 int byteswap_code)
11795 {
11796 struct elf32_arm_link_hash_table *globals;
11797
11798 globals = elf32_arm_hash_table (info);
11799 if (globals == NULL)
11800 return;
11801
11802 globals->byteswap_code = byteswap_code;
11803 }
11804
11805 /* Set the sizes of the dynamic sections. */
11806
11807 static bfd_boolean
11808 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11809 struct bfd_link_info * info)
11810 {
11811 bfd * dynobj;
11812 asection * s;
11813 bfd_boolean plt;
11814 bfd_boolean relocs;
11815 bfd *ibfd;
11816 struct elf32_arm_link_hash_table *htab;
11817
11818 htab = elf32_arm_hash_table (info);
11819 if (htab == NULL)
11820 return FALSE;
11821
11822 dynobj = elf_hash_table (info)->dynobj;
11823 BFD_ASSERT (dynobj != NULL);
11824 check_use_blx (htab);
11825
11826 if (elf_hash_table (info)->dynamic_sections_created)
11827 {
11828 /* Set the contents of the .interp section to the interpreter. */
11829 if (info->executable)
11830 {
11831 s = bfd_get_section_by_name (dynobj, ".interp");
11832 BFD_ASSERT (s != NULL);
11833 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11834 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11835 }
11836 }
11837
11838 /* Set up .got offsets for local syms, and space for local dynamic
11839 relocs. */
11840 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11841 {
11842 bfd_signed_vma *local_got;
11843 bfd_signed_vma *end_local_got;
11844 char *local_tls_type;
11845 bfd_size_type locsymcount;
11846 Elf_Internal_Shdr *symtab_hdr;
11847 asection *srel;
11848 bfd_boolean is_vxworks = htab->vxworks_p;
11849
11850 if (! is_arm_elf (ibfd))
11851 continue;
11852
11853 for (s = ibfd->sections; s != NULL; s = s->next)
11854 {
11855 struct elf32_arm_relocs_copied *p;
11856
11857 for (p = (struct elf32_arm_relocs_copied *)
11858 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11859 {
11860 if (!bfd_is_abs_section (p->section)
11861 && bfd_is_abs_section (p->section->output_section))
11862 {
11863 /* Input section has been discarded, either because
11864 it is a copy of a linkonce section or due to
11865 linker script /DISCARD/, so we'll be discarding
11866 the relocs too. */
11867 }
11868 else if (is_vxworks
11869 && strcmp (p->section->output_section->name,
11870 ".tls_vars") == 0)
11871 {
11872 /* Relocations in vxworks .tls_vars sections are
11873 handled specially by the loader. */
11874 }
11875 else if (p->count != 0)
11876 {
11877 srel = elf_section_data (p->section)->sreloc;
11878 srel->size += p->count * RELOC_SIZE (htab);
11879 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11880 info->flags |= DF_TEXTREL;
11881 }
11882 }
11883 }
11884
11885 local_got = elf_local_got_refcounts (ibfd);
11886 if (!local_got)
11887 continue;
11888
11889 symtab_hdr = & elf_symtab_hdr (ibfd);
11890 locsymcount = symtab_hdr->sh_info;
11891 end_local_got = local_got + locsymcount;
11892 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11893 s = htab->sgot;
11894 srel = htab->srelgot;
11895 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11896 {
11897 if (*local_got > 0)
11898 {
11899 *local_got = s->size;
11900 if (*local_tls_type & GOT_TLS_GD)
11901 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11902 s->size += 8;
11903 if (*local_tls_type & GOT_TLS_IE)
11904 s->size += 4;
11905 if (*local_tls_type == GOT_NORMAL)
11906 s->size += 4;
11907
11908 if (info->shared || *local_tls_type == GOT_TLS_GD)
11909 srel->size += RELOC_SIZE (htab);
11910 }
11911 else
11912 *local_got = (bfd_vma) -1;
11913 }
11914 }
11915
11916 if (htab->tls_ldm_got.refcount > 0)
11917 {
11918 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11919 for R_ARM_TLS_LDM32 relocations. */
11920 htab->tls_ldm_got.offset = htab->sgot->size;
11921 htab->sgot->size += 8;
11922 if (info->shared)
11923 htab->srelgot->size += RELOC_SIZE (htab);
11924 }
11925 else
11926 htab->tls_ldm_got.offset = -1;
11927
11928 /* Allocate global sym .plt and .got entries, and space for global
11929 sym dynamic relocs. */
11930 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11931
11932 /* Here we rummage through the found bfds to collect glue information. */
11933 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11934 {
11935 if (! is_arm_elf (ibfd))
11936 continue;
11937
11938 /* Initialise mapping tables for code/data. */
11939 bfd_elf32_arm_init_maps (ibfd);
11940
11941 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11942 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11943 /* xgettext:c-format */
11944 _bfd_error_handler (_("Errors encountered processing file %s"),
11945 ibfd->filename);
11946 }
11947
11948 /* Allocate space for the glue sections now that we've sized them. */
11949 bfd_elf32_arm_allocate_interworking_sections (info);
11950
11951 /* The check_relocs and adjust_dynamic_symbol entry points have
11952 determined the sizes of the various dynamic sections. Allocate
11953 memory for them. */
11954 plt = FALSE;
11955 relocs = FALSE;
11956 for (s = dynobj->sections; s != NULL; s = s->next)
11957 {
11958 const char * name;
11959
11960 if ((s->flags & SEC_LINKER_CREATED) == 0)
11961 continue;
11962
11963 /* It's OK to base decisions on the section name, because none
11964 of the dynobj section names depend upon the input files. */
11965 name = bfd_get_section_name (dynobj, s);
11966
11967 if (strcmp (name, ".plt") == 0)
11968 {
11969 /* Remember whether there is a PLT. */
11970 plt = s->size != 0;
11971 }
11972 else if (CONST_STRNEQ (name, ".rel"))
11973 {
11974 if (s->size != 0)
11975 {
11976 /* Remember whether there are any reloc sections other
11977 than .rel(a).plt and .rela.plt.unloaded. */
11978 if (s != htab->srelplt && s != htab->srelplt2)
11979 relocs = TRUE;
11980
11981 /* We use the reloc_count field as a counter if we need
11982 to copy relocs into the output file. */
11983 s->reloc_count = 0;
11984 }
11985 }
11986 else if (! CONST_STRNEQ (name, ".got")
11987 && strcmp (name, ".dynbss") != 0)
11988 {
11989 /* It's not one of our sections, so don't allocate space. */
11990 continue;
11991 }
11992
11993 if (s->size == 0)
11994 {
11995 /* If we don't need this section, strip it from the
11996 output file. This is mostly to handle .rel(a).bss and
11997 .rel(a).plt. We must create both sections in
11998 create_dynamic_sections, because they must be created
11999 before the linker maps input sections to output
12000 sections. The linker does that before
12001 adjust_dynamic_symbol is called, and it is that
12002 function which decides whether anything needs to go
12003 into these sections. */
12004 s->flags |= SEC_EXCLUDE;
12005 continue;
12006 }
12007
12008 if ((s->flags & SEC_HAS_CONTENTS) == 0)
12009 continue;
12010
12011 /* Allocate memory for the section contents. */
12012 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
12013 if (s->contents == NULL)
12014 return FALSE;
12015 }
12016
12017 if (elf_hash_table (info)->dynamic_sections_created)
12018 {
12019 /* Add some entries to the .dynamic section. We fill in the
12020 values later, in elf32_arm_finish_dynamic_sections, but we
12021 must add the entries now so that we get the correct size for
12022 the .dynamic section. The DT_DEBUG entry is filled in by the
12023 dynamic linker and used by the debugger. */
12024 #define add_dynamic_entry(TAG, VAL) \
12025 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
12026
12027 if (info->executable)
12028 {
12029 if (!add_dynamic_entry (DT_DEBUG, 0))
12030 return FALSE;
12031 }
12032
12033 if (plt)
12034 {
12035 if ( !add_dynamic_entry (DT_PLTGOT, 0)
12036 || !add_dynamic_entry (DT_PLTRELSZ, 0)
12037 || !add_dynamic_entry (DT_PLTREL,
12038 htab->use_rel ? DT_REL : DT_RELA)
12039 || !add_dynamic_entry (DT_JMPREL, 0))
12040 return FALSE;
12041 }
12042
12043 if (relocs)
12044 {
12045 if (htab->use_rel)
12046 {
12047 if (!add_dynamic_entry (DT_REL, 0)
12048 || !add_dynamic_entry (DT_RELSZ, 0)
12049 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
12050 return FALSE;
12051 }
12052 else
12053 {
12054 if (!add_dynamic_entry (DT_RELA, 0)
12055 || !add_dynamic_entry (DT_RELASZ, 0)
12056 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
12057 return FALSE;
12058 }
12059 }
12060
12061 /* If any dynamic relocs apply to a read-only section,
12062 then we need a DT_TEXTREL entry. */
12063 if ((info->flags & DF_TEXTREL) == 0)
12064 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
12065 info);
12066
12067 if ((info->flags & DF_TEXTREL) != 0)
12068 {
12069 if (!add_dynamic_entry (DT_TEXTREL, 0))
12070 return FALSE;
12071 }
12072 if (htab->vxworks_p
12073 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
12074 return FALSE;
12075 }
12076 #undef add_dynamic_entry
12077
12078 return TRUE;
12079 }
12080
12081 /* Finish up dynamic symbol handling. We set the contents of various
12082 dynamic sections here. */
12083
12084 static bfd_boolean
12085 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
12086 struct bfd_link_info * info,
12087 struct elf_link_hash_entry * h,
12088 Elf_Internal_Sym * sym)
12089 {
12090 bfd * dynobj;
12091 struct elf32_arm_link_hash_table *htab;
12092 struct elf32_arm_link_hash_entry *eh;
12093
12094 dynobj = elf_hash_table (info)->dynobj;
12095 htab = elf32_arm_hash_table (info);
12096 if (htab == NULL)
12097 return FALSE;
12098
12099 eh = (struct elf32_arm_link_hash_entry *) h;
12100
12101 if (h->plt.offset != (bfd_vma) -1)
12102 {
12103 asection * splt;
12104 asection * srel;
12105 bfd_byte *loc;
12106 bfd_vma plt_index;
12107 Elf_Internal_Rela rel;
12108
12109 /* This symbol has an entry in the procedure linkage table. Set
12110 it up. */
12111
12112 BFD_ASSERT (h->dynindx != -1);
12113
12114 splt = bfd_get_section_by_name (dynobj, ".plt");
12115 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
12116 BFD_ASSERT (splt != NULL && srel != NULL);
12117
12118 /* Fill in the entry in the procedure linkage table. */
12119 if (htab->symbian_p)
12120 {
12121 put_arm_insn (htab, output_bfd,
12122 elf32_arm_symbian_plt_entry[0],
12123 splt->contents + h->plt.offset);
12124 bfd_put_32 (output_bfd,
12125 elf32_arm_symbian_plt_entry[1],
12126 splt->contents + h->plt.offset + 4);
12127
12128 /* Fill in the entry in the .rel.plt section. */
12129 rel.r_offset = (splt->output_section->vma
12130 + splt->output_offset
12131 + h->plt.offset + 4);
12132 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12133
12134 /* Get the index in the procedure linkage table which
12135 corresponds to this symbol. This is the index of this symbol
12136 in all the symbols for which we are making plt entries. The
12137 first entry in the procedure linkage table is reserved. */
12138 plt_index = ((h->plt.offset - htab->plt_header_size)
12139 / htab->plt_entry_size);
12140 }
12141 else
12142 {
12143 bfd_vma got_offset, got_address, plt_address;
12144 bfd_vma got_displacement;
12145 asection * sgot;
12146 bfd_byte * ptr;
12147
12148 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12149 BFD_ASSERT (sgot != NULL);
12150
12151 /* Get the offset into the .got.plt table of the entry that
12152 corresponds to this function. */
12153 got_offset = eh->plt_got_offset;
12154
12155 /* Get the index in the procedure linkage table which
12156 corresponds to this symbol. This is the index of this symbol
12157 in all the symbols for which we are making plt entries. The
12158 first three entries in .got.plt are reserved; after that
12159 symbols appear in the same order as in .plt. */
12160 plt_index = (got_offset - 12) / 4;
12161
12162 /* Calculate the address of the GOT entry. */
12163 got_address = (sgot->output_section->vma
12164 + sgot->output_offset
12165 + got_offset);
12166
12167 /* ...and the address of the PLT entry. */
12168 plt_address = (splt->output_section->vma
12169 + splt->output_offset
12170 + h->plt.offset);
12171
12172 ptr = htab->splt->contents + h->plt.offset;
12173 if (htab->vxworks_p && info->shared)
12174 {
12175 unsigned int i;
12176 bfd_vma val;
12177
12178 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12179 {
12180 val = elf32_arm_vxworks_shared_plt_entry[i];
12181 if (i == 2)
12182 val |= got_address - sgot->output_section->vma;
12183 if (i == 5)
12184 val |= plt_index * RELOC_SIZE (htab);
12185 if (i == 2 || i == 5)
12186 bfd_put_32 (output_bfd, val, ptr);
12187 else
12188 put_arm_insn (htab, output_bfd, val, ptr);
12189 }
12190 }
12191 else if (htab->vxworks_p)
12192 {
12193 unsigned int i;
12194 bfd_vma val;
12195
12196 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12197 {
12198 val = elf32_arm_vxworks_exec_plt_entry[i];
12199 if (i == 2)
12200 val |= got_address;
12201 if (i == 4)
12202 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12203 if (i == 5)
12204 val |= plt_index * RELOC_SIZE (htab);
12205 if (i == 2 || i == 5)
12206 bfd_put_32 (output_bfd, val, ptr);
12207 else
12208 put_arm_insn (htab, output_bfd, val, ptr);
12209 }
12210
12211 loc = (htab->srelplt2->contents
12212 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12213
12214 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12215 referencing the GOT for this PLT entry. */
12216 rel.r_offset = plt_address + 8;
12217 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12218 rel.r_addend = got_offset;
12219 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12220 loc += RELOC_SIZE (htab);
12221
12222 /* Create the R_ARM_ABS32 relocation referencing the
12223 beginning of the PLT for this GOT entry. */
12224 rel.r_offset = got_address;
12225 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12226 rel.r_addend = 0;
12227 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12228 }
12229 else
12230 {
12231 bfd_signed_vma thumb_refs;
12232 /* Calculate the displacement between the PLT slot and the
12233 entry in the GOT. The eight-byte offset accounts for the
12234 value produced by adding to pc in the first instruction
12235 of the PLT stub. */
12236 got_displacement = got_address - (plt_address + 8);
12237
12238 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12239
12240 thumb_refs = eh->plt_thumb_refcount;
12241 if (!htab->use_blx)
12242 thumb_refs += eh->plt_maybe_thumb_refcount;
12243
12244 if (thumb_refs > 0)
12245 {
12246 put_thumb_insn (htab, output_bfd,
12247 elf32_arm_plt_thumb_stub[0], ptr - 4);
12248 put_thumb_insn (htab, output_bfd,
12249 elf32_arm_plt_thumb_stub[1], ptr - 2);
12250 }
12251
12252 put_arm_insn (htab, output_bfd,
12253 elf32_arm_plt_entry[0]
12254 | ((got_displacement & 0x0ff00000) >> 20),
12255 ptr + 0);
12256 put_arm_insn (htab, output_bfd,
12257 elf32_arm_plt_entry[1]
12258 | ((got_displacement & 0x000ff000) >> 12),
12259 ptr+ 4);
12260 put_arm_insn (htab, output_bfd,
12261 elf32_arm_plt_entry[2]
12262 | (got_displacement & 0x00000fff),
12263 ptr + 8);
12264 #ifdef FOUR_WORD_PLT
12265 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12266 #endif
12267 }
12268
12269 /* Fill in the entry in the global offset table. */
12270 bfd_put_32 (output_bfd,
12271 (splt->output_section->vma
12272 + splt->output_offset),
12273 sgot->contents + got_offset);
12274
12275 /* Fill in the entry in the .rel(a).plt section. */
12276 rel.r_addend = 0;
12277 rel.r_offset = got_address;
12278 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12279 }
12280
12281 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12282 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12283
12284 if (!h->def_regular)
12285 {
12286 /* Mark the symbol as undefined, rather than as defined in
12287 the .plt section. Leave the value alone. */
12288 sym->st_shndx = SHN_UNDEF;
12289 /* If the symbol is weak, we do need to clear the value.
12290 Otherwise, the PLT entry would provide a definition for
12291 the symbol even if the symbol wasn't defined anywhere,
12292 and so the symbol would never be NULL. */
12293 if (!h->ref_regular_nonweak)
12294 sym->st_value = 0;
12295 }
12296 }
12297
12298 if (h->got.offset != (bfd_vma) -1
12299 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12300 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12301 {
12302 asection * sgot;
12303 asection * srel;
12304 Elf_Internal_Rela rel;
12305 bfd_byte *loc;
12306 bfd_vma offset;
12307
12308 /* This symbol has an entry in the global offset table. Set it
12309 up. */
12310 sgot = bfd_get_section_by_name (dynobj, ".got");
12311 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12312 BFD_ASSERT (sgot != NULL && srel != NULL);
12313
12314 offset = (h->got.offset & ~(bfd_vma) 1);
12315 rel.r_addend = 0;
12316 rel.r_offset = (sgot->output_section->vma
12317 + sgot->output_offset
12318 + offset);
12319
12320 /* If this is a static link, or it is a -Bsymbolic link and the
12321 symbol is defined locally or was forced to be local because
12322 of a version file, we just want to emit a RELATIVE reloc.
12323 The entry in the global offset table will already have been
12324 initialized in the relocate_section function. */
12325 if (info->shared
12326 && SYMBOL_REFERENCES_LOCAL (info, h))
12327 {
12328 BFD_ASSERT ((h->got.offset & 1) != 0);
12329 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12330 if (!htab->use_rel)
12331 {
12332 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12333 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12334 }
12335 }
12336 else
12337 {
12338 BFD_ASSERT ((h->got.offset & 1) == 0);
12339 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12340 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12341 }
12342
12343 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12344 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12345 }
12346
12347 if (h->needs_copy)
12348 {
12349 asection * s;
12350 Elf_Internal_Rela rel;
12351 bfd_byte *loc;
12352
12353 /* This symbol needs a copy reloc. Set it up. */
12354 BFD_ASSERT (h->dynindx != -1
12355 && (h->root.type == bfd_link_hash_defined
12356 || h->root.type == bfd_link_hash_defweak));
12357
12358 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12359 RELOC_SECTION (htab, ".bss"));
12360 BFD_ASSERT (s != NULL);
12361
12362 rel.r_addend = 0;
12363 rel.r_offset = (h->root.u.def.value
12364 + h->root.u.def.section->output_section->vma
12365 + h->root.u.def.section->output_offset);
12366 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12367 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12368 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12369 }
12370
12371 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12372 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12373 to the ".got" section. */
12374 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12375 || (!htab->vxworks_p && h == htab->root.hgot))
12376 sym->st_shndx = SHN_ABS;
12377
12378 return TRUE;
12379 }
12380
12381 /* Finish up the dynamic sections. */
12382
12383 static bfd_boolean
12384 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12385 {
12386 bfd * dynobj;
12387 asection * sgot;
12388 asection * sdyn;
12389 struct elf32_arm_link_hash_table *htab;
12390
12391 htab = elf32_arm_hash_table (info);
12392 if (htab == NULL)
12393 return FALSE;
12394
12395 dynobj = elf_hash_table (info)->dynobj;
12396
12397 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12398 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12399 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12400
12401 if (elf_hash_table (info)->dynamic_sections_created)
12402 {
12403 asection *splt;
12404 Elf32_External_Dyn *dyncon, *dynconend;
12405
12406 splt = bfd_get_section_by_name (dynobj, ".plt");
12407 BFD_ASSERT (splt != NULL && sdyn != NULL);
12408
12409 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12410 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12411
12412 for (; dyncon < dynconend; dyncon++)
12413 {
12414 Elf_Internal_Dyn dyn;
12415 const char * name;
12416 asection * s;
12417
12418 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12419
12420 switch (dyn.d_tag)
12421 {
12422 unsigned int type;
12423
12424 default:
12425 if (htab->vxworks_p
12426 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12427 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12428 break;
12429
12430 case DT_HASH:
12431 name = ".hash";
12432 goto get_vma_if_bpabi;
12433 case DT_STRTAB:
12434 name = ".dynstr";
12435 goto get_vma_if_bpabi;
12436 case DT_SYMTAB:
12437 name = ".dynsym";
12438 goto get_vma_if_bpabi;
12439 case DT_VERSYM:
12440 name = ".gnu.version";
12441 goto get_vma_if_bpabi;
12442 case DT_VERDEF:
12443 name = ".gnu.version_d";
12444 goto get_vma_if_bpabi;
12445 case DT_VERNEED:
12446 name = ".gnu.version_r";
12447 goto get_vma_if_bpabi;
12448
12449 case DT_PLTGOT:
12450 name = ".got";
12451 goto get_vma;
12452 case DT_JMPREL:
12453 name = RELOC_SECTION (htab, ".plt");
12454 get_vma:
12455 s = bfd_get_section_by_name (output_bfd, name);
12456 BFD_ASSERT (s != NULL);
12457 if (!htab->symbian_p)
12458 dyn.d_un.d_ptr = s->vma;
12459 else
12460 /* In the BPABI, tags in the PT_DYNAMIC section point
12461 at the file offset, not the memory address, for the
12462 convenience of the post linker. */
12463 dyn.d_un.d_ptr = s->filepos;
12464 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12465 break;
12466
12467 get_vma_if_bpabi:
12468 if (htab->symbian_p)
12469 goto get_vma;
12470 break;
12471
12472 case DT_PLTRELSZ:
12473 s = bfd_get_section_by_name (output_bfd,
12474 RELOC_SECTION (htab, ".plt"));
12475 BFD_ASSERT (s != NULL);
12476 dyn.d_un.d_val = s->size;
12477 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12478 break;
12479
12480 case DT_RELSZ:
12481 case DT_RELASZ:
12482 if (!htab->symbian_p)
12483 {
12484 /* My reading of the SVR4 ABI indicates that the
12485 procedure linkage table relocs (DT_JMPREL) should be
12486 included in the overall relocs (DT_REL). This is
12487 what Solaris does. However, UnixWare can not handle
12488 that case. Therefore, we override the DT_RELSZ entry
12489 here to make it not include the JMPREL relocs. Since
12490 the linker script arranges for .rel(a).plt to follow all
12491 other relocation sections, we don't have to worry
12492 about changing the DT_REL entry. */
12493 s = bfd_get_section_by_name (output_bfd,
12494 RELOC_SECTION (htab, ".plt"));
12495 if (s != NULL)
12496 dyn.d_un.d_val -= s->size;
12497 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12498 break;
12499 }
12500 /* Fall through. */
12501
12502 case DT_REL:
12503 case DT_RELA:
12504 /* In the BPABI, the DT_REL tag must point at the file
12505 offset, not the VMA, of the first relocation
12506 section. So, we use code similar to that in
12507 elflink.c, but do not check for SHF_ALLOC on the
12508 relcoation section, since relocations sections are
12509 never allocated under the BPABI. The comments above
12510 about Unixware notwithstanding, we include all of the
12511 relocations here. */
12512 if (htab->symbian_p)
12513 {
12514 unsigned int i;
12515 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12516 ? SHT_REL : SHT_RELA);
12517 dyn.d_un.d_val = 0;
12518 for (i = 1; i < elf_numsections (output_bfd); i++)
12519 {
12520 Elf_Internal_Shdr *hdr
12521 = elf_elfsections (output_bfd)[i];
12522 if (hdr->sh_type == type)
12523 {
12524 if (dyn.d_tag == DT_RELSZ
12525 || dyn.d_tag == DT_RELASZ)
12526 dyn.d_un.d_val += hdr->sh_size;
12527 else if ((ufile_ptr) hdr->sh_offset
12528 <= dyn.d_un.d_val - 1)
12529 dyn.d_un.d_val = hdr->sh_offset;
12530 }
12531 }
12532 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12533 }
12534 break;
12535
12536 /* Set the bottom bit of DT_INIT/FINI if the
12537 corresponding function is Thumb. */
12538 case DT_INIT:
12539 name = info->init_function;
12540 goto get_sym;
12541 case DT_FINI:
12542 name = info->fini_function;
12543 get_sym:
12544 /* If it wasn't set by elf_bfd_final_link
12545 then there is nothing to adjust. */
12546 if (dyn.d_un.d_val != 0)
12547 {
12548 struct elf_link_hash_entry * eh;
12549
12550 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12551 FALSE, FALSE, TRUE);
12552 if (eh != NULL
12553 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12554 {
12555 dyn.d_un.d_val |= 1;
12556 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12557 }
12558 }
12559 break;
12560 }
12561 }
12562
12563 /* Fill in the first entry in the procedure linkage table. */
12564 if (splt->size > 0 && htab->plt_header_size)
12565 {
12566 const bfd_vma *plt0_entry;
12567 bfd_vma got_address, plt_address, got_displacement;
12568
12569 /* Calculate the addresses of the GOT and PLT. */
12570 got_address = sgot->output_section->vma + sgot->output_offset;
12571 plt_address = splt->output_section->vma + splt->output_offset;
12572
12573 if (htab->vxworks_p)
12574 {
12575 /* The VxWorks GOT is relocated by the dynamic linker.
12576 Therefore, we must emit relocations rather than simply
12577 computing the values now. */
12578 Elf_Internal_Rela rel;
12579
12580 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12581 put_arm_insn (htab, output_bfd, plt0_entry[0],
12582 splt->contents + 0);
12583 put_arm_insn (htab, output_bfd, plt0_entry[1],
12584 splt->contents + 4);
12585 put_arm_insn (htab, output_bfd, plt0_entry[2],
12586 splt->contents + 8);
12587 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12588
12589 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12590 rel.r_offset = plt_address + 12;
12591 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12592 rel.r_addend = 0;
12593 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12594 htab->srelplt2->contents);
12595 }
12596 else
12597 {
12598 got_displacement = got_address - (plt_address + 16);
12599
12600 plt0_entry = elf32_arm_plt0_entry;
12601 put_arm_insn (htab, output_bfd, plt0_entry[0],
12602 splt->contents + 0);
12603 put_arm_insn (htab, output_bfd, plt0_entry[1],
12604 splt->contents + 4);
12605 put_arm_insn (htab, output_bfd, plt0_entry[2],
12606 splt->contents + 8);
12607 put_arm_insn (htab, output_bfd, plt0_entry[3],
12608 splt->contents + 12);
12609
12610 #ifdef FOUR_WORD_PLT
12611 /* The displacement value goes in the otherwise-unused
12612 last word of the second entry. */
12613 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12614 #else
12615 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12616 #endif
12617 }
12618 }
12619
12620 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12621 really seem like the right value. */
12622 if (splt->output_section->owner == output_bfd)
12623 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12624
12625 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12626 {
12627 /* Correct the .rel(a).plt.unloaded relocations. They will have
12628 incorrect symbol indexes. */
12629 int num_plts;
12630 unsigned char *p;
12631
12632 num_plts = ((htab->splt->size - htab->plt_header_size)
12633 / htab->plt_entry_size);
12634 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12635
12636 for (; num_plts; num_plts--)
12637 {
12638 Elf_Internal_Rela rel;
12639
12640 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12641 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12642 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12643 p += RELOC_SIZE (htab);
12644
12645 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12646 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12647 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12648 p += RELOC_SIZE (htab);
12649 }
12650 }
12651 }
12652
12653 /* Fill in the first three entries in the global offset table. */
12654 if (sgot)
12655 {
12656 if (sgot->size > 0)
12657 {
12658 if (sdyn == NULL)
12659 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12660 else
12661 bfd_put_32 (output_bfd,
12662 sdyn->output_section->vma + sdyn->output_offset,
12663 sgot->contents);
12664 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12665 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12666 }
12667
12668 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12669 }
12670
12671 return TRUE;
12672 }
12673
12674 static void
12675 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12676 {
12677 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12678 struct elf32_arm_link_hash_table *globals;
12679
12680 i_ehdrp = elf_elfheader (abfd);
12681
12682 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12683 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12684 else
12685 i_ehdrp->e_ident[EI_OSABI] = 0;
12686 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12687
12688 if (link_info)
12689 {
12690 globals = elf32_arm_hash_table (link_info);
12691 if (globals != NULL && globals->byteswap_code)
12692 i_ehdrp->e_flags |= EF_ARM_BE8;
12693 }
12694 }
12695
12696 static enum elf_reloc_type_class
12697 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12698 {
12699 switch ((int) ELF32_R_TYPE (rela->r_info))
12700 {
12701 case R_ARM_RELATIVE:
12702 return reloc_class_relative;
12703 case R_ARM_JUMP_SLOT:
12704 return reloc_class_plt;
12705 case R_ARM_COPY:
12706 return reloc_class_copy;
12707 default:
12708 return reloc_class_normal;
12709 }
12710 }
12711
12712 /* Set the right machine number for an Arm ELF file. */
12713
12714 static bfd_boolean
12715 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12716 {
12717 if (hdr->sh_type == SHT_NOTE)
12718 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12719
12720 return TRUE;
12721 }
12722
12723 static void
12724 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12725 {
12726 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12727 }
12728
12729 /* Return TRUE if this is an unwinding table entry. */
12730
12731 static bfd_boolean
12732 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12733 {
12734 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12735 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12736 }
12737
12738
12739 /* Set the type and flags for an ARM section. We do this by
12740 the section name, which is a hack, but ought to work. */
12741
12742 static bfd_boolean
12743 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12744 {
12745 const char * name;
12746
12747 name = bfd_get_section_name (abfd, sec);
12748
12749 if (is_arm_elf_unwind_section_name (abfd, name))
12750 {
12751 hdr->sh_type = SHT_ARM_EXIDX;
12752 hdr->sh_flags |= SHF_LINK_ORDER;
12753 }
12754 return TRUE;
12755 }
12756
12757 /* Handle an ARM specific section when reading an object file. This is
12758 called when bfd_section_from_shdr finds a section with an unknown
12759 type. */
12760
12761 static bfd_boolean
12762 elf32_arm_section_from_shdr (bfd *abfd,
12763 Elf_Internal_Shdr * hdr,
12764 const char *name,
12765 int shindex)
12766 {
12767 /* There ought to be a place to keep ELF backend specific flags, but
12768 at the moment there isn't one. We just keep track of the
12769 sections by their name, instead. Fortunately, the ABI gives
12770 names for all the ARM specific sections, so we will probably get
12771 away with this. */
12772 switch (hdr->sh_type)
12773 {
12774 case SHT_ARM_EXIDX:
12775 case SHT_ARM_PREEMPTMAP:
12776 case SHT_ARM_ATTRIBUTES:
12777 break;
12778
12779 default:
12780 return FALSE;
12781 }
12782
12783 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12784 return FALSE;
12785
12786 return TRUE;
12787 }
12788
12789 /* A structure used to record a list of sections, independently
12790 of the next and prev fields in the asection structure. */
12791 typedef struct section_list
12792 {
12793 asection * sec;
12794 struct section_list * next;
12795 struct section_list * prev;
12796 }
12797 section_list;
12798
12799 /* Unfortunately we need to keep a list of sections for which
12800 an _arm_elf_section_data structure has been allocated. This
12801 is because it is possible for functions like elf32_arm_write_section
12802 to be called on a section which has had an elf_data_structure
12803 allocated for it (and so the used_by_bfd field is valid) but
12804 for which the ARM extended version of this structure - the
12805 _arm_elf_section_data structure - has not been allocated. */
12806 static section_list * sections_with_arm_elf_section_data = NULL;
12807
12808 static void
12809 record_section_with_arm_elf_section_data (asection * sec)
12810 {
12811 struct section_list * entry;
12812
12813 entry = (struct section_list *) bfd_malloc (sizeof (* entry));
12814 if (entry == NULL)
12815 return;
12816 entry->sec = sec;
12817 entry->next = sections_with_arm_elf_section_data;
12818 entry->prev = NULL;
12819 if (entry->next != NULL)
12820 entry->next->prev = entry;
12821 sections_with_arm_elf_section_data = entry;
12822 }
12823
12824 static struct section_list *
12825 find_arm_elf_section_entry (asection * sec)
12826 {
12827 struct section_list * entry;
12828 static struct section_list * last_entry = NULL;
12829
12830 /* This is a short cut for the typical case where the sections are added
12831 to the sections_with_arm_elf_section_data list in forward order and
12832 then looked up here in backwards order. This makes a real difference
12833 to the ld-srec/sec64k.exp linker test. */
12834 entry = sections_with_arm_elf_section_data;
12835 if (last_entry != NULL)
12836 {
12837 if (last_entry->sec == sec)
12838 entry = last_entry;
12839 else if (last_entry->next != NULL
12840 && last_entry->next->sec == sec)
12841 entry = last_entry->next;
12842 }
12843
12844 for (; entry; entry = entry->next)
12845 if (entry->sec == sec)
12846 break;
12847
12848 if (entry)
12849 /* Record the entry prior to this one - it is the entry we are most
12850 likely to want to locate next time. Also this way if we have been
12851 called from unrecord_section_with_arm_elf_section_data() we will not
12852 be caching a pointer that is about to be freed. */
12853 last_entry = entry->prev;
12854
12855 return entry;
12856 }
12857
12858 static _arm_elf_section_data *
12859 get_arm_elf_section_data (asection * sec)
12860 {
12861 struct section_list * entry;
12862
12863 entry = find_arm_elf_section_entry (sec);
12864
12865 if (entry)
12866 return elf32_arm_section_data (entry->sec);
12867 else
12868 return NULL;
12869 }
12870
12871 static void
12872 unrecord_section_with_arm_elf_section_data (asection * sec)
12873 {
12874 struct section_list * entry;
12875
12876 entry = find_arm_elf_section_entry (sec);
12877
12878 if (entry)
12879 {
12880 if (entry->prev != NULL)
12881 entry->prev->next = entry->next;
12882 if (entry->next != NULL)
12883 entry->next->prev = entry->prev;
12884 if (entry == sections_with_arm_elf_section_data)
12885 sections_with_arm_elf_section_data = entry->next;
12886 free (entry);
12887 }
12888 }
12889
12890
12891 typedef struct
12892 {
12893 void *finfo;
12894 struct bfd_link_info *info;
12895 asection *sec;
12896 int sec_shndx;
12897 int (*func) (void *, const char *, Elf_Internal_Sym *,
12898 asection *, struct elf_link_hash_entry *);
12899 } output_arch_syminfo;
12900
12901 enum map_symbol_type
12902 {
12903 ARM_MAP_ARM,
12904 ARM_MAP_THUMB,
12905 ARM_MAP_DATA
12906 };
12907
12908
12909 /* Output a single mapping symbol. */
12910
12911 static bfd_boolean
12912 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12913 enum map_symbol_type type,
12914 bfd_vma offset)
12915 {
12916 static const char *names[3] = {"$a", "$t", "$d"};
12917 Elf_Internal_Sym sym;
12918
12919 sym.st_value = osi->sec->output_section->vma
12920 + osi->sec->output_offset
12921 + offset;
12922 sym.st_size = 0;
12923 sym.st_other = 0;
12924 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12925 sym.st_shndx = osi->sec_shndx;
12926 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
12927 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12928 }
12929
12930
12931 /* Output mapping symbols for PLT entries associated with H. */
12932
12933 static bfd_boolean
12934 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12935 {
12936 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12937 struct elf32_arm_link_hash_table *htab;
12938 struct elf32_arm_link_hash_entry *eh;
12939 bfd_vma addr;
12940
12941 if (h->root.type == bfd_link_hash_indirect)
12942 return TRUE;
12943
12944 if (h->root.type == bfd_link_hash_warning)
12945 /* When warning symbols are created, they **replace** the "real"
12946 entry in the hash table, thus we never get to see the real
12947 symbol in a hash traversal. So look at it now. */
12948 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12949
12950 if (h->plt.offset == (bfd_vma) -1)
12951 return TRUE;
12952
12953 htab = elf32_arm_hash_table (osi->info);
12954 if (htab == NULL)
12955 return FALSE;
12956
12957 eh = (struct elf32_arm_link_hash_entry *) h;
12958 addr = h->plt.offset;
12959 if (htab->symbian_p)
12960 {
12961 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12962 return FALSE;
12963 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12964 return FALSE;
12965 }
12966 else if (htab->vxworks_p)
12967 {
12968 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12969 return FALSE;
12970 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12971 return FALSE;
12972 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12973 return FALSE;
12974 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12975 return FALSE;
12976 }
12977 else
12978 {
12979 bfd_signed_vma thumb_refs;
12980
12981 thumb_refs = eh->plt_thumb_refcount;
12982 if (!htab->use_blx)
12983 thumb_refs += eh->plt_maybe_thumb_refcount;
12984
12985 if (thumb_refs > 0)
12986 {
12987 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12988 return FALSE;
12989 }
12990 #ifdef FOUR_WORD_PLT
12991 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12992 return FALSE;
12993 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12994 return FALSE;
12995 #else
12996 /* A three-word PLT with no Thumb thunk contains only Arm code,
12997 so only need to output a mapping symbol for the first PLT entry and
12998 entries with thumb thunks. */
12999 if (thumb_refs > 0 || addr == 20)
13000 {
13001 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
13002 return FALSE;
13003 }
13004 #endif
13005 }
13006
13007 return TRUE;
13008 }
13009
13010 /* Output a single local symbol for a generated stub. */
13011
13012 static bfd_boolean
13013 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
13014 bfd_vma offset, bfd_vma size)
13015 {
13016 Elf_Internal_Sym sym;
13017
13018 sym.st_value = osi->sec->output_section->vma
13019 + osi->sec->output_offset
13020 + offset;
13021 sym.st_size = size;
13022 sym.st_other = 0;
13023 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13024 sym.st_shndx = osi->sec_shndx;
13025 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
13026 }
13027
13028 static bfd_boolean
13029 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
13030 void * in_arg)
13031 {
13032 struct elf32_arm_stub_hash_entry *stub_entry;
13033 struct bfd_link_info *info;
13034 asection *stub_sec;
13035 bfd_vma addr;
13036 char *stub_name;
13037 output_arch_syminfo *osi;
13038 const insn_sequence *template_sequence;
13039 enum stub_insn_type prev_type;
13040 int size;
13041 int i;
13042 enum map_symbol_type sym_type;
13043
13044 /* Massage our args to the form they really have. */
13045 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13046 osi = (output_arch_syminfo *) in_arg;
13047
13048 info = osi->info;
13049
13050 stub_sec = stub_entry->stub_sec;
13051
13052 /* Ensure this stub is attached to the current section being
13053 processed. */
13054 if (stub_sec != osi->sec)
13055 return TRUE;
13056
13057 addr = (bfd_vma) stub_entry->stub_offset;
13058 stub_name = stub_entry->output_name;
13059
13060 template_sequence = stub_entry->stub_template;
13061 switch (template_sequence[0].type)
13062 {
13063 case ARM_TYPE:
13064 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
13065 return FALSE;
13066 break;
13067 case THUMB16_TYPE:
13068 case THUMB32_TYPE:
13069 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
13070 stub_entry->stub_size))
13071 return FALSE;
13072 break;
13073 default:
13074 BFD_FAIL ();
13075 return 0;
13076 }
13077
13078 prev_type = DATA_TYPE;
13079 size = 0;
13080 for (i = 0; i < stub_entry->stub_template_size; i++)
13081 {
13082 switch (template_sequence[i].type)
13083 {
13084 case ARM_TYPE:
13085 sym_type = ARM_MAP_ARM;
13086 break;
13087
13088 case THUMB16_TYPE:
13089 case THUMB32_TYPE:
13090 sym_type = ARM_MAP_THUMB;
13091 break;
13092
13093 case DATA_TYPE:
13094 sym_type = ARM_MAP_DATA;
13095 break;
13096
13097 default:
13098 BFD_FAIL ();
13099 return FALSE;
13100 }
13101
13102 if (template_sequence[i].type != prev_type)
13103 {
13104 prev_type = template_sequence[i].type;
13105 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
13106 return FALSE;
13107 }
13108
13109 switch (template_sequence[i].type)
13110 {
13111 case ARM_TYPE:
13112 case THUMB32_TYPE:
13113 size += 4;
13114 break;
13115
13116 case THUMB16_TYPE:
13117 size += 2;
13118 break;
13119
13120 case DATA_TYPE:
13121 size += 4;
13122 break;
13123
13124 default:
13125 BFD_FAIL ();
13126 return FALSE;
13127 }
13128 }
13129
13130 return TRUE;
13131 }
13132
13133 /* Output mapping symbols for linker generated sections,
13134 and for those data-only sections that do not have a
13135 $d. */
13136
13137 static bfd_boolean
13138 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13139 struct bfd_link_info *info,
13140 void *finfo,
13141 int (*func) (void *, const char *,
13142 Elf_Internal_Sym *,
13143 asection *,
13144 struct elf_link_hash_entry *))
13145 {
13146 output_arch_syminfo osi;
13147 struct elf32_arm_link_hash_table *htab;
13148 bfd_vma offset;
13149 bfd_size_type size;
13150 bfd *input_bfd;
13151
13152 htab = elf32_arm_hash_table (info);
13153 if (htab == NULL)
13154 return FALSE;
13155
13156 check_use_blx (htab);
13157
13158 osi.finfo = finfo;
13159 osi.info = info;
13160 osi.func = func;
13161
13162 /* Add a $d mapping symbol to data-only sections that
13163 don't have any mapping symbol. This may result in (harmless) redundant
13164 mapping symbols. */
13165 for (input_bfd = info->input_bfds;
13166 input_bfd != NULL;
13167 input_bfd = input_bfd->link_next)
13168 {
13169 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13170 for (osi.sec = input_bfd->sections;
13171 osi.sec != NULL;
13172 osi.sec = osi.sec->next)
13173 {
13174 if (osi.sec->output_section != NULL
13175 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
13176 != 0)
13177 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13178 == SEC_HAS_CONTENTS
13179 && get_arm_elf_section_data (osi.sec) != NULL
13180 && get_arm_elf_section_data (osi.sec)->mapcount == 0
13181 && osi.sec->size > 0)
13182 {
13183 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13184 (output_bfd, osi.sec->output_section);
13185 if (osi.sec_shndx != (int)SHN_BAD)
13186 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13187 }
13188 }
13189 }
13190
13191 /* ARM->Thumb glue. */
13192 if (htab->arm_glue_size > 0)
13193 {
13194 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13195 ARM2THUMB_GLUE_SECTION_NAME);
13196
13197 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13198 (output_bfd, osi.sec->output_section);
13199 if (info->shared || htab->root.is_relocatable_executable
13200 || htab->pic_veneer)
13201 size = ARM2THUMB_PIC_GLUE_SIZE;
13202 else if (htab->use_blx)
13203 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13204 else
13205 size = ARM2THUMB_STATIC_GLUE_SIZE;
13206
13207 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13208 {
13209 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13210 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13211 }
13212 }
13213
13214 /* Thumb->ARM glue. */
13215 if (htab->thumb_glue_size > 0)
13216 {
13217 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13218 THUMB2ARM_GLUE_SECTION_NAME);
13219
13220 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13221 (output_bfd, osi.sec->output_section);
13222 size = THUMB2ARM_GLUE_SIZE;
13223
13224 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13225 {
13226 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13227 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13228 }
13229 }
13230
13231 /* ARMv4 BX veneers. */
13232 if (htab->bx_glue_size > 0)
13233 {
13234 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13235 ARM_BX_GLUE_SECTION_NAME);
13236
13237 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13238 (output_bfd, osi.sec->output_section);
13239
13240 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13241 }
13242
13243 /* Long calls stubs. */
13244 if (htab->stub_bfd && htab->stub_bfd->sections)
13245 {
13246 asection* stub_sec;
13247
13248 for (stub_sec = htab->stub_bfd->sections;
13249 stub_sec != NULL;
13250 stub_sec = stub_sec->next)
13251 {
13252 /* Ignore non-stub sections. */
13253 if (!strstr (stub_sec->name, STUB_SUFFIX))
13254 continue;
13255
13256 osi.sec = stub_sec;
13257
13258 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13259 (output_bfd, osi.sec->output_section);
13260
13261 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13262 }
13263 }
13264
13265 /* Finally, output mapping symbols for the PLT. */
13266 if (!htab->splt || htab->splt->size == 0)
13267 return TRUE;
13268
13269 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13270 htab->splt->output_section);
13271 osi.sec = htab->splt;
13272 /* Output mapping symbols for the plt header. SymbianOS does not have a
13273 plt header. */
13274 if (htab->vxworks_p)
13275 {
13276 /* VxWorks shared libraries have no PLT header. */
13277 if (!info->shared)
13278 {
13279 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13280 return FALSE;
13281 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13282 return FALSE;
13283 }
13284 }
13285 else if (!htab->symbian_p)
13286 {
13287 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13288 return FALSE;
13289 #ifndef FOUR_WORD_PLT
13290 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13291 return FALSE;
13292 #endif
13293 }
13294
13295 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13296 return TRUE;
13297 }
13298
13299 /* Allocate target specific section data. */
13300
13301 static bfd_boolean
13302 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13303 {
13304 if (!sec->used_by_bfd)
13305 {
13306 _arm_elf_section_data *sdata;
13307 bfd_size_type amt = sizeof (*sdata);
13308
13309 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13310 if (sdata == NULL)
13311 return FALSE;
13312 sec->used_by_bfd = sdata;
13313 }
13314
13315 record_section_with_arm_elf_section_data (sec);
13316
13317 return _bfd_elf_new_section_hook (abfd, sec);
13318 }
13319
13320
13321 /* Used to order a list of mapping symbols by address. */
13322
13323 static int
13324 elf32_arm_compare_mapping (const void * a, const void * b)
13325 {
13326 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13327 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13328
13329 if (amap->vma > bmap->vma)
13330 return 1;
13331 else if (amap->vma < bmap->vma)
13332 return -1;
13333 else if (amap->type > bmap->type)
13334 /* Ensure results do not depend on the host qsort for objects with
13335 multiple mapping symbols at the same address by sorting on type
13336 after vma. */
13337 return 1;
13338 else if (amap->type < bmap->type)
13339 return -1;
13340 else
13341 return 0;
13342 }
13343
13344 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13345
13346 static unsigned long
13347 offset_prel31 (unsigned long addr, bfd_vma offset)
13348 {
13349 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13350 }
13351
13352 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13353 relocations. */
13354
13355 static void
13356 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13357 {
13358 unsigned long first_word = bfd_get_32 (output_bfd, from);
13359 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13360
13361 /* High bit of first word is supposed to be zero. */
13362 if ((first_word & 0x80000000ul) == 0)
13363 first_word = offset_prel31 (first_word, offset);
13364
13365 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13366 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13367 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13368 second_word = offset_prel31 (second_word, offset);
13369
13370 bfd_put_32 (output_bfd, first_word, to);
13371 bfd_put_32 (output_bfd, second_word, to + 4);
13372 }
13373
13374 /* Data for make_branch_to_a8_stub(). */
13375
13376 struct a8_branch_to_stub_data {
13377 asection *writing_section;
13378 bfd_byte *contents;
13379 };
13380
13381
13382 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13383 places for a particular section. */
13384
13385 static bfd_boolean
13386 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13387 void *in_arg)
13388 {
13389 struct elf32_arm_stub_hash_entry *stub_entry;
13390 struct a8_branch_to_stub_data *data;
13391 bfd_byte *contents;
13392 unsigned long branch_insn;
13393 bfd_vma veneered_insn_loc, veneer_entry_loc;
13394 bfd_signed_vma branch_offset;
13395 bfd *abfd;
13396 unsigned int target;
13397
13398 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13399 data = (struct a8_branch_to_stub_data *) in_arg;
13400
13401 if (stub_entry->target_section != data->writing_section
13402 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13403 return TRUE;
13404
13405 contents = data->contents;
13406
13407 veneered_insn_loc = stub_entry->target_section->output_section->vma
13408 + stub_entry->target_section->output_offset
13409 + stub_entry->target_value;
13410
13411 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13412 + stub_entry->stub_sec->output_offset
13413 + stub_entry->stub_offset;
13414
13415 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13416 veneered_insn_loc &= ~3u;
13417
13418 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13419
13420 abfd = stub_entry->target_section->owner;
13421 target = stub_entry->target_value;
13422
13423 /* We attempt to avoid this condition by setting stubs_always_after_branch
13424 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13425 This check is just to be on the safe side... */
13426 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13427 {
13428 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13429 "allocated in unsafe location"), abfd);
13430 return FALSE;
13431 }
13432
13433 switch (stub_entry->stub_type)
13434 {
13435 case arm_stub_a8_veneer_b:
13436 case arm_stub_a8_veneer_b_cond:
13437 branch_insn = 0xf0009000;
13438 goto jump24;
13439
13440 case arm_stub_a8_veneer_blx:
13441 branch_insn = 0xf000e800;
13442 goto jump24;
13443
13444 case arm_stub_a8_veneer_bl:
13445 {
13446 unsigned int i1, j1, i2, j2, s;
13447
13448 branch_insn = 0xf000d000;
13449
13450 jump24:
13451 if (branch_offset < -16777216 || branch_offset > 16777214)
13452 {
13453 /* There's not much we can do apart from complain if this
13454 happens. */
13455 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13456 "of range (input file too large)"), abfd);
13457 return FALSE;
13458 }
13459
13460 /* i1 = not(j1 eor s), so:
13461 not i1 = j1 eor s
13462 j1 = (not i1) eor s. */
13463
13464 branch_insn |= (branch_offset >> 1) & 0x7ff;
13465 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13466 i2 = (branch_offset >> 22) & 1;
13467 i1 = (branch_offset >> 23) & 1;
13468 s = (branch_offset >> 24) & 1;
13469 j1 = (!i1) ^ s;
13470 j2 = (!i2) ^ s;
13471 branch_insn |= j2 << 11;
13472 branch_insn |= j1 << 13;
13473 branch_insn |= s << 26;
13474 }
13475 break;
13476
13477 default:
13478 BFD_FAIL ();
13479 return FALSE;
13480 }
13481
13482 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13483 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13484
13485 return TRUE;
13486 }
13487
13488 /* Do code byteswapping. Return FALSE afterwards so that the section is
13489 written out as normal. */
13490
13491 static bfd_boolean
13492 elf32_arm_write_section (bfd *output_bfd,
13493 struct bfd_link_info *link_info,
13494 asection *sec,
13495 bfd_byte *contents)
13496 {
13497 unsigned int mapcount, errcount;
13498 _arm_elf_section_data *arm_data;
13499 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13500 elf32_arm_section_map *map;
13501 elf32_vfp11_erratum_list *errnode;
13502 bfd_vma ptr;
13503 bfd_vma end;
13504 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13505 bfd_byte tmp;
13506 unsigned int i;
13507
13508 if (globals == NULL)
13509 return FALSE;
13510
13511 /* If this section has not been allocated an _arm_elf_section_data
13512 structure then we cannot record anything. */
13513 arm_data = get_arm_elf_section_data (sec);
13514 if (arm_data == NULL)
13515 return FALSE;
13516
13517 mapcount = arm_data->mapcount;
13518 map = arm_data->map;
13519 errcount = arm_data->erratumcount;
13520
13521 if (errcount != 0)
13522 {
13523 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13524
13525 for (errnode = arm_data->erratumlist; errnode != 0;
13526 errnode = errnode->next)
13527 {
13528 bfd_vma target = errnode->vma - offset;
13529
13530 switch (errnode->type)
13531 {
13532 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13533 {
13534 bfd_vma branch_to_veneer;
13535 /* Original condition code of instruction, plus bit mask for
13536 ARM B instruction. */
13537 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13538 | 0x0a000000;
13539
13540 /* The instruction is before the label. */
13541 target -= 4;
13542
13543 /* Above offset included in -4 below. */
13544 branch_to_veneer = errnode->u.b.veneer->vma
13545 - errnode->vma - 4;
13546
13547 if ((signed) branch_to_veneer < -(1 << 25)
13548 || (signed) branch_to_veneer >= (1 << 25))
13549 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13550 "range"), output_bfd);
13551
13552 insn |= (branch_to_veneer >> 2) & 0xffffff;
13553 contents[endianflip ^ target] = insn & 0xff;
13554 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13555 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13556 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13557 }
13558 break;
13559
13560 case VFP11_ERRATUM_ARM_VENEER:
13561 {
13562 bfd_vma branch_from_veneer;
13563 unsigned int insn;
13564
13565 /* Take size of veneer into account. */
13566 branch_from_veneer = errnode->u.v.branch->vma
13567 - errnode->vma - 12;
13568
13569 if ((signed) branch_from_veneer < -(1 << 25)
13570 || (signed) branch_from_veneer >= (1 << 25))
13571 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13572 "range"), output_bfd);
13573
13574 /* Original instruction. */
13575 insn = errnode->u.v.branch->u.b.vfp_insn;
13576 contents[endianflip ^ target] = insn & 0xff;
13577 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13578 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13579 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13580
13581 /* Branch back to insn after original insn. */
13582 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13583 contents[endianflip ^ (target + 4)] = insn & 0xff;
13584 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13585 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13586 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13587 }
13588 break;
13589
13590 default:
13591 abort ();
13592 }
13593 }
13594 }
13595
13596 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13597 {
13598 arm_unwind_table_edit *edit_node
13599 = arm_data->u.exidx.unwind_edit_list;
13600 /* Now, sec->size is the size of the section we will write. The original
13601 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13602 markers) was sec->rawsize. (This isn't the case if we perform no
13603 edits, then rawsize will be zero and we should use size). */
13604 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13605 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13606 unsigned int in_index, out_index;
13607 bfd_vma add_to_offsets = 0;
13608
13609 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13610 {
13611 if (edit_node)
13612 {
13613 unsigned int edit_index = edit_node->index;
13614
13615 if (in_index < edit_index && in_index * 8 < input_size)
13616 {
13617 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13618 contents + in_index * 8, add_to_offsets);
13619 out_index++;
13620 in_index++;
13621 }
13622 else if (in_index == edit_index
13623 || (in_index * 8 >= input_size
13624 && edit_index == UINT_MAX))
13625 {
13626 switch (edit_node->type)
13627 {
13628 case DELETE_EXIDX_ENTRY:
13629 in_index++;
13630 add_to_offsets += 8;
13631 break;
13632
13633 case INSERT_EXIDX_CANTUNWIND_AT_END:
13634 {
13635 asection *text_sec = edit_node->linked_section;
13636 bfd_vma text_offset = text_sec->output_section->vma
13637 + text_sec->output_offset
13638 + text_sec->size;
13639 bfd_vma exidx_offset = offset + out_index * 8;
13640 unsigned long prel31_offset;
13641
13642 /* Note: this is meant to be equivalent to an
13643 R_ARM_PREL31 relocation. These synthetic
13644 EXIDX_CANTUNWIND markers are not relocated by the
13645 usual BFD method. */
13646 prel31_offset = (text_offset - exidx_offset)
13647 & 0x7ffffffful;
13648
13649 /* First address we can't unwind. */
13650 bfd_put_32 (output_bfd, prel31_offset,
13651 &edited_contents[out_index * 8]);
13652
13653 /* Code for EXIDX_CANTUNWIND. */
13654 bfd_put_32 (output_bfd, 0x1,
13655 &edited_contents[out_index * 8 + 4]);
13656
13657 out_index++;
13658 add_to_offsets -= 8;
13659 }
13660 break;
13661 }
13662
13663 edit_node = edit_node->next;
13664 }
13665 }
13666 else
13667 {
13668 /* No more edits, copy remaining entries verbatim. */
13669 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13670 contents + in_index * 8, add_to_offsets);
13671 out_index++;
13672 in_index++;
13673 }
13674 }
13675
13676 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13677 bfd_set_section_contents (output_bfd, sec->output_section,
13678 edited_contents,
13679 (file_ptr) sec->output_offset, sec->size);
13680
13681 return TRUE;
13682 }
13683
13684 /* Fix code to point to Cortex-A8 erratum stubs. */
13685 if (globals->fix_cortex_a8)
13686 {
13687 struct a8_branch_to_stub_data data;
13688
13689 data.writing_section = sec;
13690 data.contents = contents;
13691
13692 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13693 &data);
13694 }
13695
13696 if (mapcount == 0)
13697 return FALSE;
13698
13699 if (globals->byteswap_code)
13700 {
13701 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13702
13703 ptr = map[0].vma;
13704 for (i = 0; i < mapcount; i++)
13705 {
13706 if (i == mapcount - 1)
13707 end = sec->size;
13708 else
13709 end = map[i + 1].vma;
13710
13711 switch (map[i].type)
13712 {
13713 case 'a':
13714 /* Byte swap code words. */
13715 while (ptr + 3 < end)
13716 {
13717 tmp = contents[ptr];
13718 contents[ptr] = contents[ptr + 3];
13719 contents[ptr + 3] = tmp;
13720 tmp = contents[ptr + 1];
13721 contents[ptr + 1] = contents[ptr + 2];
13722 contents[ptr + 2] = tmp;
13723 ptr += 4;
13724 }
13725 break;
13726
13727 case 't':
13728 /* Byte swap code halfwords. */
13729 while (ptr + 1 < end)
13730 {
13731 tmp = contents[ptr];
13732 contents[ptr] = contents[ptr + 1];
13733 contents[ptr + 1] = tmp;
13734 ptr += 2;
13735 }
13736 break;
13737
13738 case 'd':
13739 /* Leave data alone. */
13740 break;
13741 }
13742 ptr = end;
13743 }
13744 }
13745
13746 free (map);
13747 arm_data->mapcount = 0;
13748 arm_data->mapsize = 0;
13749 arm_data->map = NULL;
13750 unrecord_section_with_arm_elf_section_data (sec);
13751
13752 return FALSE;
13753 }
13754
13755 static void
13756 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13757 asection * sec,
13758 void * ignore ATTRIBUTE_UNUSED)
13759 {
13760 unrecord_section_with_arm_elf_section_data (sec);
13761 }
13762
13763 static bfd_boolean
13764 elf32_arm_close_and_cleanup (bfd * abfd)
13765 {
13766 if (abfd->sections)
13767 bfd_map_over_sections (abfd,
13768 unrecord_section_via_map_over_sections,
13769 NULL);
13770
13771 return _bfd_elf_close_and_cleanup (abfd);
13772 }
13773
13774 static bfd_boolean
13775 elf32_arm_bfd_free_cached_info (bfd * abfd)
13776 {
13777 if (abfd->sections)
13778 bfd_map_over_sections (abfd,
13779 unrecord_section_via_map_over_sections,
13780 NULL);
13781
13782 return _bfd_free_cached_info (abfd);
13783 }
13784
13785 /* Display STT_ARM_TFUNC symbols as functions. */
13786
13787 static void
13788 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13789 asymbol *asym)
13790 {
13791 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13792
13793 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13794 elfsym->symbol.flags |= BSF_FUNCTION;
13795 }
13796
13797
13798 /* Mangle thumb function symbols as we read them in. */
13799
13800 static bfd_boolean
13801 elf32_arm_swap_symbol_in (bfd * abfd,
13802 const void *psrc,
13803 const void *pshn,
13804 Elf_Internal_Sym *dst)
13805 {
13806 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13807 return FALSE;
13808
13809 /* New EABI objects mark thumb function symbols by setting the low bit of
13810 the address. Turn these into STT_ARM_TFUNC. */
13811 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13812 && (dst->st_value & 1))
13813 {
13814 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13815 dst->st_value &= ~(bfd_vma) 1;
13816 }
13817 return TRUE;
13818 }
13819
13820
13821 /* Mangle thumb function symbols as we write them out. */
13822
13823 static void
13824 elf32_arm_swap_symbol_out (bfd *abfd,
13825 const Elf_Internal_Sym *src,
13826 void *cdst,
13827 void *shndx)
13828 {
13829 Elf_Internal_Sym newsym;
13830
13831 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13832 of the address set, as per the new EABI. We do this unconditionally
13833 because objcopy does not set the elf header flags until after
13834 it writes out the symbol table. */
13835 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13836 {
13837 newsym = *src;
13838 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13839 if (newsym.st_shndx != SHN_UNDEF)
13840 {
13841 /* Do this only for defined symbols. At link type, the static
13842 linker will simulate the work of dynamic linker of resolving
13843 symbols and will carry over the thumbness of found symbols to
13844 the output symbol table. It's not clear how it happens, but
13845 the thumbness of undefined symbols can well be different at
13846 runtime, and writing '1' for them will be confusing for users
13847 and possibly for dynamic linker itself.
13848 */
13849 newsym.st_value |= 1;
13850 }
13851
13852 src = &newsym;
13853 }
13854 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13855 }
13856
13857 /* Add the PT_ARM_EXIDX program header. */
13858
13859 static bfd_boolean
13860 elf32_arm_modify_segment_map (bfd *abfd,
13861 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13862 {
13863 struct elf_segment_map *m;
13864 asection *sec;
13865
13866 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13867 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13868 {
13869 /* If there is already a PT_ARM_EXIDX header, then we do not
13870 want to add another one. This situation arises when running
13871 "strip"; the input binary already has the header. */
13872 m = elf_tdata (abfd)->segment_map;
13873 while (m && m->p_type != PT_ARM_EXIDX)
13874 m = m->next;
13875 if (!m)
13876 {
13877 m = (struct elf_segment_map *)
13878 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13879 if (m == NULL)
13880 return FALSE;
13881 m->p_type = PT_ARM_EXIDX;
13882 m->count = 1;
13883 m->sections[0] = sec;
13884
13885 m->next = elf_tdata (abfd)->segment_map;
13886 elf_tdata (abfd)->segment_map = m;
13887 }
13888 }
13889
13890 return TRUE;
13891 }
13892
13893 /* We may add a PT_ARM_EXIDX program header. */
13894
13895 static int
13896 elf32_arm_additional_program_headers (bfd *abfd,
13897 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13898 {
13899 asection *sec;
13900
13901 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13902 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13903 return 1;
13904 else
13905 return 0;
13906 }
13907
13908 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13909
13910 static bfd_boolean
13911 elf32_arm_is_function_type (unsigned int type)
13912 {
13913 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13914 }
13915
13916 /* We use this to override swap_symbol_in and swap_symbol_out. */
13917 const struct elf_size_info elf32_arm_size_info =
13918 {
13919 sizeof (Elf32_External_Ehdr),
13920 sizeof (Elf32_External_Phdr),
13921 sizeof (Elf32_External_Shdr),
13922 sizeof (Elf32_External_Rel),
13923 sizeof (Elf32_External_Rela),
13924 sizeof (Elf32_External_Sym),
13925 sizeof (Elf32_External_Dyn),
13926 sizeof (Elf_External_Note),
13927 4,
13928 1,
13929 32, 2,
13930 ELFCLASS32, EV_CURRENT,
13931 bfd_elf32_write_out_phdrs,
13932 bfd_elf32_write_shdrs_and_ehdr,
13933 bfd_elf32_checksum_contents,
13934 bfd_elf32_write_relocs,
13935 elf32_arm_swap_symbol_in,
13936 elf32_arm_swap_symbol_out,
13937 bfd_elf32_slurp_reloc_table,
13938 bfd_elf32_slurp_symbol_table,
13939 bfd_elf32_swap_dyn_in,
13940 bfd_elf32_swap_dyn_out,
13941 bfd_elf32_swap_reloc_in,
13942 bfd_elf32_swap_reloc_out,
13943 bfd_elf32_swap_reloca_in,
13944 bfd_elf32_swap_reloca_out
13945 };
13946
13947 #define ELF_ARCH bfd_arch_arm
13948 #define ELF_MACHINE_CODE EM_ARM
13949 #ifdef __QNXTARGET__
13950 #define ELF_MAXPAGESIZE 0x1000
13951 #else
13952 #define ELF_MAXPAGESIZE 0x8000
13953 #endif
13954 #define ELF_MINPAGESIZE 0x1000
13955 #define ELF_COMMONPAGESIZE 0x1000
13956
13957 #define bfd_elf32_mkobject elf32_arm_mkobject
13958
13959 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13960 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13961 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13962 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13963 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13964 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13965 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13966 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13967 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13968 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13969 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13970 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13971 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13972 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13973 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13974
13975 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13976 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13977 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13978 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13979 #define elf_backend_check_relocs elf32_arm_check_relocs
13980 #define elf_backend_relocate_section elf32_arm_relocate_section
13981 #define elf_backend_write_section elf32_arm_write_section
13982 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13983 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13984 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13985 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13986 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13987 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13988 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13989 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13990 #define elf_backend_object_p elf32_arm_object_p
13991 #define elf_backend_section_flags elf32_arm_section_flags
13992 #define elf_backend_fake_sections elf32_arm_fake_sections
13993 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13994 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13995 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13996 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13997 #define elf_backend_size_info elf32_arm_size_info
13998 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13999 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
14000 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
14001 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
14002 #define elf_backend_is_function_type elf32_arm_is_function_type
14003
14004 #define elf_backend_can_refcount 1
14005 #define elf_backend_can_gc_sections 1
14006 #define elf_backend_plt_readonly 1
14007 #define elf_backend_want_got_plt 1
14008 #define elf_backend_want_plt_sym 0
14009 #define elf_backend_may_use_rel_p 1
14010 #define elf_backend_may_use_rela_p 0
14011 #define elf_backend_default_use_rela_p 0
14012
14013 #define elf_backend_got_header_size 12
14014
14015 #undef elf_backend_obj_attrs_vendor
14016 #define elf_backend_obj_attrs_vendor "aeabi"
14017 #undef elf_backend_obj_attrs_section
14018 #define elf_backend_obj_attrs_section ".ARM.attributes"
14019 #undef elf_backend_obj_attrs_arg_type
14020 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
14021 #undef elf_backend_obj_attrs_section_type
14022 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
14023 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
14024
14025 #include "elf32-target.h"
14026
14027 /* VxWorks Targets. */
14028
14029 #undef TARGET_LITTLE_SYM
14030 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
14031 #undef TARGET_LITTLE_NAME
14032 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
14033 #undef TARGET_BIG_SYM
14034 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
14035 #undef TARGET_BIG_NAME
14036 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
14037
14038 /* Like elf32_arm_link_hash_table_create -- but overrides
14039 appropriately for VxWorks. */
14040
14041 static struct bfd_link_hash_table *
14042 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
14043 {
14044 struct bfd_link_hash_table *ret;
14045
14046 ret = elf32_arm_link_hash_table_create (abfd);
14047 if (ret)
14048 {
14049 struct elf32_arm_link_hash_table *htab
14050 = (struct elf32_arm_link_hash_table *) ret;
14051 htab->use_rel = 0;
14052 htab->vxworks_p = 1;
14053 }
14054 return ret;
14055 }
14056
14057 static void
14058 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
14059 {
14060 elf32_arm_final_write_processing (abfd, linker);
14061 elf_vxworks_final_write_processing (abfd, linker);
14062 }
14063
14064 #undef elf32_bed
14065 #define elf32_bed elf32_arm_vxworks_bed
14066
14067 #undef bfd_elf32_bfd_link_hash_table_create
14068 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
14069 #undef elf_backend_add_symbol_hook
14070 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
14071 #undef elf_backend_final_write_processing
14072 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
14073 #undef elf_backend_emit_relocs
14074 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
14075
14076 #undef elf_backend_may_use_rel_p
14077 #define elf_backend_may_use_rel_p 0
14078 #undef elf_backend_may_use_rela_p
14079 #define elf_backend_may_use_rela_p 1
14080 #undef elf_backend_default_use_rela_p
14081 #define elf_backend_default_use_rela_p 1
14082 #undef elf_backend_want_plt_sym
14083 #define elf_backend_want_plt_sym 1
14084 #undef ELF_MAXPAGESIZE
14085 #define ELF_MAXPAGESIZE 0x1000
14086
14087 #include "elf32-target.h"
14088
14089
14090 /* Merge backend specific data from an object file to the output
14091 object file when linking. */
14092
14093 static bfd_boolean
14094 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
14095 {
14096 flagword out_flags;
14097 flagword in_flags;
14098 bfd_boolean flags_compatible = TRUE;
14099 asection *sec;
14100
14101 /* Check if we have the same endianess. */
14102 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
14103 return FALSE;
14104
14105 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14106 return TRUE;
14107
14108 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
14109 return FALSE;
14110
14111 /* The input BFD must have had its flags initialised. */
14112 /* The following seems bogus to me -- The flags are initialized in
14113 the assembler but I don't think an elf_flags_init field is
14114 written into the object. */
14115 /* BFD_ASSERT (elf_flags_init (ibfd)); */
14116
14117 in_flags = elf_elfheader (ibfd)->e_flags;
14118 out_flags = elf_elfheader (obfd)->e_flags;
14119
14120 /* In theory there is no reason why we couldn't handle this. However
14121 in practice it isn't even close to working and there is no real
14122 reason to want it. */
14123 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
14124 && !(ibfd->flags & DYNAMIC)
14125 && (in_flags & EF_ARM_BE8))
14126 {
14127 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14128 ibfd);
14129 return FALSE;
14130 }
14131
14132 if (!elf_flags_init (obfd))
14133 {
14134 /* If the input is the default architecture and had the default
14135 flags then do not bother setting the flags for the output
14136 architecture, instead allow future merges to do this. If no
14137 future merges ever set these flags then they will retain their
14138 uninitialised values, which surprise surprise, correspond
14139 to the default values. */
14140 if (bfd_get_arch_info (ibfd)->the_default
14141 && elf_elfheader (ibfd)->e_flags == 0)
14142 return TRUE;
14143
14144 elf_flags_init (obfd) = TRUE;
14145 elf_elfheader (obfd)->e_flags = in_flags;
14146
14147 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14148 && bfd_get_arch_info (obfd)->the_default)
14149 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14150
14151 return TRUE;
14152 }
14153
14154 /* Determine what should happen if the input ARM architecture
14155 does not match the output ARM architecture. */
14156 if (! bfd_arm_merge_machines (ibfd, obfd))
14157 return FALSE;
14158
14159 /* Identical flags must be compatible. */
14160 if (in_flags == out_flags)
14161 return TRUE;
14162
14163 /* Check to see if the input BFD actually contains any sections. If
14164 not, its flags may not have been initialised either, but it
14165 cannot actually cause any incompatiblity. Do not short-circuit
14166 dynamic objects; their section list may be emptied by
14167 elf_link_add_object_symbols.
14168
14169 Also check to see if there are no code sections in the input.
14170 In this case there is no need to check for code specific flags.
14171 XXX - do we need to worry about floating-point format compatability
14172 in data sections ? */
14173 if (!(ibfd->flags & DYNAMIC))
14174 {
14175 bfd_boolean null_input_bfd = TRUE;
14176 bfd_boolean only_data_sections = TRUE;
14177
14178 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14179 {
14180 /* Ignore synthetic glue sections. */
14181 if (strcmp (sec->name, ".glue_7")
14182 && strcmp (sec->name, ".glue_7t"))
14183 {
14184 if ((bfd_get_section_flags (ibfd, sec)
14185 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14186 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14187 only_data_sections = FALSE;
14188
14189 null_input_bfd = FALSE;
14190 break;
14191 }
14192 }
14193
14194 if (null_input_bfd || only_data_sections)
14195 return TRUE;
14196 }
14197
14198 /* Complain about various flag mismatches. */
14199 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14200 EF_ARM_EABI_VERSION (out_flags)))
14201 {
14202 _bfd_error_handler
14203 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14204 ibfd, obfd,
14205 (in_flags & EF_ARM_EABIMASK) >> 24,
14206 (out_flags & EF_ARM_EABIMASK) >> 24);
14207 return FALSE;
14208 }
14209
14210 /* Not sure what needs to be checked for EABI versions >= 1. */
14211 /* VxWorks libraries do not use these flags. */
14212 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14213 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14214 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14215 {
14216 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14217 {
14218 _bfd_error_handler
14219 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14220 ibfd, obfd,
14221 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14222 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14223 flags_compatible = FALSE;
14224 }
14225
14226 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14227 {
14228 if (in_flags & EF_ARM_APCS_FLOAT)
14229 _bfd_error_handler
14230 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14231 ibfd, obfd);
14232 else
14233 _bfd_error_handler
14234 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14235 ibfd, obfd);
14236
14237 flags_compatible = FALSE;
14238 }
14239
14240 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14241 {
14242 if (in_flags & EF_ARM_VFP_FLOAT)
14243 _bfd_error_handler
14244 (_("error: %B uses VFP instructions, whereas %B does not"),
14245 ibfd, obfd);
14246 else
14247 _bfd_error_handler
14248 (_("error: %B uses FPA instructions, whereas %B does not"),
14249 ibfd, obfd);
14250
14251 flags_compatible = FALSE;
14252 }
14253
14254 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14255 {
14256 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14257 _bfd_error_handler
14258 (_("error: %B uses Maverick instructions, whereas %B does not"),
14259 ibfd, obfd);
14260 else
14261 _bfd_error_handler
14262 (_("error: %B does not use Maverick instructions, whereas %B does"),
14263 ibfd, obfd);
14264
14265 flags_compatible = FALSE;
14266 }
14267
14268 #ifdef EF_ARM_SOFT_FLOAT
14269 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14270 {
14271 /* We can allow interworking between code that is VFP format
14272 layout, and uses either soft float or integer regs for
14273 passing floating point arguments and results. We already
14274 know that the APCS_FLOAT flags match; similarly for VFP
14275 flags. */
14276 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14277 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14278 {
14279 if (in_flags & EF_ARM_SOFT_FLOAT)
14280 _bfd_error_handler
14281 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14282 ibfd, obfd);
14283 else
14284 _bfd_error_handler
14285 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14286 ibfd, obfd);
14287
14288 flags_compatible = FALSE;
14289 }
14290 }
14291 #endif
14292
14293 /* Interworking mismatch is only a warning. */
14294 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14295 {
14296 if (in_flags & EF_ARM_INTERWORK)
14297 {
14298 _bfd_error_handler
14299 (_("Warning: %B supports interworking, whereas %B does not"),
14300 ibfd, obfd);
14301 }
14302 else
14303 {
14304 _bfd_error_handler
14305 (_("Warning: %B does not support interworking, whereas %B does"),
14306 ibfd, obfd);
14307 }
14308 }
14309 }
14310
14311 return flags_compatible;
14312 }
14313
14314
14315 /* Symbian OS Targets. */
14316
14317 #undef TARGET_LITTLE_SYM
14318 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14319 #undef TARGET_LITTLE_NAME
14320 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14321 #undef TARGET_BIG_SYM
14322 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14323 #undef TARGET_BIG_NAME
14324 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14325
14326 /* Like elf32_arm_link_hash_table_create -- but overrides
14327 appropriately for Symbian OS. */
14328
14329 static struct bfd_link_hash_table *
14330 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14331 {
14332 struct bfd_link_hash_table *ret;
14333
14334 ret = elf32_arm_link_hash_table_create (abfd);
14335 if (ret)
14336 {
14337 struct elf32_arm_link_hash_table *htab
14338 = (struct elf32_arm_link_hash_table *)ret;
14339 /* There is no PLT header for Symbian OS. */
14340 htab->plt_header_size = 0;
14341 /* The PLT entries are each one instruction and one word. */
14342 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14343 htab->symbian_p = 1;
14344 /* Symbian uses armv5t or above, so use_blx is always true. */
14345 htab->use_blx = 1;
14346 htab->root.is_relocatable_executable = 1;
14347 }
14348 return ret;
14349 }
14350
14351 static const struct bfd_elf_special_section
14352 elf32_arm_symbian_special_sections[] =
14353 {
14354 /* In a BPABI executable, the dynamic linking sections do not go in
14355 the loadable read-only segment. The post-linker may wish to
14356 refer to these sections, but they are not part of the final
14357 program image. */
14358 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14359 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14360 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14361 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14362 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14363 /* These sections do not need to be writable as the SymbianOS
14364 postlinker will arrange things so that no dynamic relocation is
14365 required. */
14366 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14367 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14368 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14369 { NULL, 0, 0, 0, 0 }
14370 };
14371
14372 static void
14373 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14374 struct bfd_link_info *link_info)
14375 {
14376 /* BPABI objects are never loaded directly by an OS kernel; they are
14377 processed by a postlinker first, into an OS-specific format. If
14378 the D_PAGED bit is set on the file, BFD will align segments on
14379 page boundaries, so that an OS can directly map the file. With
14380 BPABI objects, that just results in wasted space. In addition,
14381 because we clear the D_PAGED bit, map_sections_to_segments will
14382 recognize that the program headers should not be mapped into any
14383 loadable segment. */
14384 abfd->flags &= ~D_PAGED;
14385 elf32_arm_begin_write_processing (abfd, link_info);
14386 }
14387
14388 static bfd_boolean
14389 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14390 struct bfd_link_info *info)
14391 {
14392 struct elf_segment_map *m;
14393 asection *dynsec;
14394
14395 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14396 segment. However, because the .dynamic section is not marked
14397 with SEC_LOAD, the generic ELF code will not create such a
14398 segment. */
14399 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14400 if (dynsec)
14401 {
14402 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14403 if (m->p_type == PT_DYNAMIC)
14404 break;
14405
14406 if (m == NULL)
14407 {
14408 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14409 m->next = elf_tdata (abfd)->segment_map;
14410 elf_tdata (abfd)->segment_map = m;
14411 }
14412 }
14413
14414 /* Also call the generic arm routine. */
14415 return elf32_arm_modify_segment_map (abfd, info);
14416 }
14417
14418 /* Return address for Ith PLT stub in section PLT, for relocation REL
14419 or (bfd_vma) -1 if it should not be included. */
14420
14421 static bfd_vma
14422 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14423 const arelent *rel ATTRIBUTE_UNUSED)
14424 {
14425 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14426 }
14427
14428
14429 #undef elf32_bed
14430 #define elf32_bed elf32_arm_symbian_bed
14431
14432 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14433 will process them and then discard them. */
14434 #undef ELF_DYNAMIC_SEC_FLAGS
14435 #define ELF_DYNAMIC_SEC_FLAGS \
14436 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14437
14438 #undef elf_backend_add_symbol_hook
14439 #undef elf_backend_emit_relocs
14440
14441 #undef bfd_elf32_bfd_link_hash_table_create
14442 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14443 #undef elf_backend_special_sections
14444 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14445 #undef elf_backend_begin_write_processing
14446 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14447 #undef elf_backend_final_write_processing
14448 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14449
14450 #undef elf_backend_modify_segment_map
14451 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14452
14453 /* There is no .got section for BPABI objects, and hence no header. */
14454 #undef elf_backend_got_header_size
14455 #define elf_backend_got_header_size 0
14456
14457 /* Similarly, there is no .got.plt section. */
14458 #undef elf_backend_want_got_plt
14459 #define elf_backend_want_got_plt 0
14460
14461 #undef elf_backend_plt_sym_val
14462 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14463
14464 #undef elf_backend_may_use_rel_p
14465 #define elf_backend_may_use_rel_p 1
14466 #undef elf_backend_may_use_rela_p
14467 #define elf_backend_may_use_rela_p 0
14468 #undef elf_backend_default_use_rela_p
14469 #define elf_backend_default_use_rela_p 0
14470 #undef elf_backend_want_plt_sym
14471 #define elf_backend_want_plt_sym 0
14472 #undef ELF_MAXPAGESIZE
14473 #define ELF_MAXPAGESIZE 0x8000
14474
14475 #include "elf32-target.h"
This page took 0.353777 seconds and 4 git commands to generate.