ld/
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_TDATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2519
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2523
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2526 {
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2529
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2532
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2535
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2538
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2542
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2544 veneers. */
2545 bfd_size_type vfp11_erratum_glue_size;
2546
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2552
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2555
2556 /* Nonzero to output a BE8 image. */
2557 int byteswap_code;
2558
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2561 int target1_is_rel;
2562
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2564 int target2_reloc;
2565
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2569 int fix_v4bx;
2570
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2572 int fix_cortex_a8;
2573
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2575 int use_blx;
2576
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2580
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2583
2584 /* Nonzero to force PIC branch veneers. */
2585 int pic_veneer;
2586
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2589
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2592
2593 /* True if the target system is VxWorks. */
2594 int vxworks_p;
2595
2596 /* True if the target system is Symbian OS. */
2597 int symbian_p;
2598
2599 /* True if the target uses REL relocations. */
2600 int use_rel;
2601
2602 /* Short-cuts to get to dynamic linker sections. */
2603 asection *sgot;
2604 asection *sgotplt;
2605 asection *srelgot;
2606 asection *splt;
2607 asection *srelplt;
2608 asection *sdynbss;
2609 asection *srelbss;
2610
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2612 asection *srelplt2;
2613
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2615 union
2616 {
2617 bfd_signed_vma refcount;
2618 bfd_vma offset;
2619 } tls_ldm_got;
2620
2621 /* Small local sym to section mapping cache. */
2622 struct sym_sec_cache sym_sec;
2623
2624 /* For convenience in allocate_dynrelocs. */
2625 bfd * obfd;
2626
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2629
2630 /* Linker stub bfd. */
2631 bfd *stub_bfd;
2632
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2636
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2639 struct map_stub
2640 {
2641 /* This is the section to which stubs in the group will be
2642 attached. */
2643 asection *link_sec;
2644 /* The stub section. */
2645 asection *stub_sec;
2646 } *stub_group;
2647
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2650 int top_index;
2651 asection **input_list;
2652 };
2653
2654 /* Create an entry in an ARM ELF linker hash table. */
2655
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2660 {
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2663
2664 /* Allocate the structure if it has not already been allocated by a
2665 subclass. */
2666 if (ret == NULL)
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2668 if (ret == NULL)
2669 return (struct bfd_hash_entry *) ret;
2670
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2674 table, string));
2675 if (ret != NULL)
2676 {
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2683
2684 ret->stub_cache = NULL;
2685 }
2686
2687 return (struct bfd_hash_entry *) ret;
2688 }
2689
2690 /* Initialize an entry in the stub hash table. */
2691
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2695 const char *string)
2696 {
2697 /* Allocate the structure if it has not already been allocated by a
2698 subclass. */
2699 if (entry == NULL)
2700 {
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2703 if (entry == NULL)
2704 return entry;
2705 }
2706
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2709 if (entry != NULL)
2710 {
2711 struct elf32_arm_stub_hash_entry *eh;
2712
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->stub_type = arm_stub_none;
2720 eh->stub_size = 0;
2721 eh->stub_template = NULL;
2722 eh->stub_template_size = 0;
2723 eh->h = NULL;
2724 eh->id_sec = NULL;
2725 }
2726
2727 return entry;
2728 }
2729
2730 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2731 shortcuts to them in our hash table. */
2732
2733 static bfd_boolean
2734 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2735 {
2736 struct elf32_arm_link_hash_table *htab;
2737
2738 htab = elf32_arm_hash_table (info);
2739 /* BPABI objects never have a GOT, or associated sections. */
2740 if (htab->symbian_p)
2741 return TRUE;
2742
2743 if (! _bfd_elf_create_got_section (dynobj, info))
2744 return FALSE;
2745
2746 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2747 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2748 if (!htab->sgot || !htab->sgotplt)
2749 abort ();
2750
2751 htab->srelgot = bfd_make_section_with_flags (dynobj,
2752 RELOC_SECTION (htab, ".got"),
2753 (SEC_ALLOC | SEC_LOAD
2754 | SEC_HAS_CONTENTS
2755 | SEC_IN_MEMORY
2756 | SEC_LINKER_CREATED
2757 | SEC_READONLY));
2758 if (htab->srelgot == NULL
2759 || ! bfd_set_section_alignment (dynobj, htab->srelgot, 2))
2760 return FALSE;
2761 return TRUE;
2762 }
2763
2764 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2765 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2766 hash table. */
2767
2768 static bfd_boolean
2769 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2770 {
2771 struct elf32_arm_link_hash_table *htab;
2772
2773 htab = elf32_arm_hash_table (info);
2774 if (!htab->sgot && !create_got_section (dynobj, info))
2775 return FALSE;
2776
2777 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2778 return FALSE;
2779
2780 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2781 htab->srelplt = bfd_get_section_by_name (dynobj,
2782 RELOC_SECTION (htab, ".plt"));
2783 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2784 if (!info->shared)
2785 htab->srelbss = bfd_get_section_by_name (dynobj,
2786 RELOC_SECTION (htab, ".bss"));
2787
2788 if (htab->vxworks_p)
2789 {
2790 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2791 return FALSE;
2792
2793 if (info->shared)
2794 {
2795 htab->plt_header_size = 0;
2796 htab->plt_entry_size
2797 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2798 }
2799 else
2800 {
2801 htab->plt_header_size
2802 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2803 htab->plt_entry_size
2804 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2805 }
2806 }
2807
2808 if (!htab->splt
2809 || !htab->srelplt
2810 || !htab->sdynbss
2811 || (!info->shared && !htab->srelbss))
2812 abort ();
2813
2814 return TRUE;
2815 }
2816
2817 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2818
2819 static void
2820 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2821 struct elf_link_hash_entry *dir,
2822 struct elf_link_hash_entry *ind)
2823 {
2824 struct elf32_arm_link_hash_entry *edir, *eind;
2825
2826 edir = (struct elf32_arm_link_hash_entry *) dir;
2827 eind = (struct elf32_arm_link_hash_entry *) ind;
2828
2829 if (eind->relocs_copied != NULL)
2830 {
2831 if (edir->relocs_copied != NULL)
2832 {
2833 struct elf32_arm_relocs_copied **pp;
2834 struct elf32_arm_relocs_copied *p;
2835
2836 /* Add reloc counts against the indirect sym to the direct sym
2837 list. Merge any entries against the same section. */
2838 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2839 {
2840 struct elf32_arm_relocs_copied *q;
2841
2842 for (q = edir->relocs_copied; q != NULL; q = q->next)
2843 if (q->section == p->section)
2844 {
2845 q->pc_count += p->pc_count;
2846 q->count += p->count;
2847 *pp = p->next;
2848 break;
2849 }
2850 if (q == NULL)
2851 pp = &p->next;
2852 }
2853 *pp = edir->relocs_copied;
2854 }
2855
2856 edir->relocs_copied = eind->relocs_copied;
2857 eind->relocs_copied = NULL;
2858 }
2859
2860 if (ind->root.type == bfd_link_hash_indirect)
2861 {
2862 /* Copy over PLT info. */
2863 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2864 eind->plt_thumb_refcount = 0;
2865 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2866 eind->plt_maybe_thumb_refcount = 0;
2867
2868 if (dir->got.refcount <= 0)
2869 {
2870 edir->tls_type = eind->tls_type;
2871 eind->tls_type = GOT_UNKNOWN;
2872 }
2873 }
2874
2875 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2876 }
2877
2878 /* Create an ARM elf linker hash table. */
2879
2880 static struct bfd_link_hash_table *
2881 elf32_arm_link_hash_table_create (bfd *abfd)
2882 {
2883 struct elf32_arm_link_hash_table *ret;
2884 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2885
2886 ret = bfd_malloc (amt);
2887 if (ret == NULL)
2888 return NULL;
2889
2890 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2891 elf32_arm_link_hash_newfunc,
2892 sizeof (struct elf32_arm_link_hash_entry)))
2893 {
2894 free (ret);
2895 return NULL;
2896 }
2897
2898 ret->sgot = NULL;
2899 ret->sgotplt = NULL;
2900 ret->srelgot = NULL;
2901 ret->splt = NULL;
2902 ret->srelplt = NULL;
2903 ret->sdynbss = NULL;
2904 ret->srelbss = NULL;
2905 ret->srelplt2 = NULL;
2906 ret->thumb_glue_size = 0;
2907 ret->arm_glue_size = 0;
2908 ret->bx_glue_size = 0;
2909 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2910 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2911 ret->vfp11_erratum_glue_size = 0;
2912 ret->num_vfp11_fixes = 0;
2913 ret->fix_cortex_a8 = 0;
2914 ret->bfd_of_glue_owner = NULL;
2915 ret->byteswap_code = 0;
2916 ret->target1_is_rel = 0;
2917 ret->target2_reloc = R_ARM_NONE;
2918 #ifdef FOUR_WORD_PLT
2919 ret->plt_header_size = 16;
2920 ret->plt_entry_size = 16;
2921 #else
2922 ret->plt_header_size = 20;
2923 ret->plt_entry_size = 12;
2924 #endif
2925 ret->fix_v4bx = 0;
2926 ret->use_blx = 0;
2927 ret->vxworks_p = 0;
2928 ret->symbian_p = 0;
2929 ret->use_rel = 1;
2930 ret->sym_sec.abfd = NULL;
2931 ret->obfd = abfd;
2932 ret->tls_ldm_got.refcount = 0;
2933 ret->stub_bfd = NULL;
2934 ret->add_stub_section = NULL;
2935 ret->layout_sections_again = NULL;
2936 ret->stub_group = NULL;
2937 ret->bfd_count = 0;
2938 ret->top_index = 0;
2939 ret->input_list = NULL;
2940
2941 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2942 sizeof (struct elf32_arm_stub_hash_entry)))
2943 {
2944 free (ret);
2945 return NULL;
2946 }
2947
2948 return &ret->root.root;
2949 }
2950
2951 /* Free the derived linker hash table. */
2952
2953 static void
2954 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2955 {
2956 struct elf32_arm_link_hash_table *ret
2957 = (struct elf32_arm_link_hash_table *) hash;
2958
2959 bfd_hash_table_free (&ret->stub_hash_table);
2960 _bfd_generic_link_hash_table_free (hash);
2961 }
2962
2963 /* Determine if we're dealing with a Thumb only architecture. */
2964
2965 static bfd_boolean
2966 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2967 {
2968 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2969 Tag_CPU_arch);
2970 int profile;
2971
2972 if (arch != TAG_CPU_ARCH_V7)
2973 return FALSE;
2974
2975 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2976 Tag_CPU_arch_profile);
2977
2978 return profile == 'M';
2979 }
2980
2981 /* Determine if we're dealing with a Thumb-2 object. */
2982
2983 static bfd_boolean
2984 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2985 {
2986 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2987 Tag_CPU_arch);
2988 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2989 }
2990
2991 static bfd_boolean
2992 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2993 {
2994 switch (stub_type)
2995 {
2996 case arm_stub_long_branch_thumb_only:
2997 case arm_stub_long_branch_v4t_thumb_arm:
2998 case arm_stub_short_branch_v4t_thumb_arm:
2999 case arm_stub_long_branch_v4t_thumb_arm_pic:
3000 case arm_stub_long_branch_thumb_only_pic:
3001 return TRUE;
3002 case arm_stub_none:
3003 BFD_FAIL ();
3004 return FALSE;
3005 break;
3006 default:
3007 return FALSE;
3008 }
3009 }
3010
3011 /* Determine the type of stub needed, if any, for a call. */
3012
3013 static enum elf32_arm_stub_type
3014 arm_type_of_stub (struct bfd_link_info *info,
3015 asection *input_sec,
3016 const Elf_Internal_Rela *rel,
3017 unsigned char st_type,
3018 struct elf32_arm_link_hash_entry *hash,
3019 bfd_vma destination,
3020 asection *sym_sec,
3021 bfd *input_bfd,
3022 const char *name)
3023 {
3024 bfd_vma location;
3025 bfd_signed_vma branch_offset;
3026 unsigned int r_type;
3027 struct elf32_arm_link_hash_table * globals;
3028 int thumb2;
3029 int thumb_only;
3030 enum elf32_arm_stub_type stub_type = arm_stub_none;
3031 int use_plt = 0;
3032
3033 /* We don't know the actual type of destination in case it is of
3034 type STT_SECTION: give up. */
3035 if (st_type == STT_SECTION)
3036 return stub_type;
3037
3038 globals = elf32_arm_hash_table (info);
3039
3040 thumb_only = using_thumb_only (globals);
3041
3042 thumb2 = using_thumb2 (globals);
3043
3044 /* Determine where the call point is. */
3045 location = (input_sec->output_offset
3046 + input_sec->output_section->vma
3047 + rel->r_offset);
3048
3049 branch_offset = (bfd_signed_vma)(destination - location);
3050
3051 r_type = ELF32_R_TYPE (rel->r_info);
3052
3053 /* Keep a simpler condition, for the sake of clarity. */
3054 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3055 {
3056 use_plt = 1;
3057 /* Note when dealing with PLT entries: the main PLT stub is in
3058 ARM mode, so if the branch is in Thumb mode, another
3059 Thumb->ARM stub will be inserted later just before the ARM
3060 PLT stub. We don't take this extra distance into account
3061 here, because if a long branch stub is needed, we'll add a
3062 Thumb->Arm one and branch directly to the ARM PLT entry
3063 because it avoids spreading offset corrections in several
3064 places. */
3065 }
3066
3067 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3068 {
3069 /* Handle cases where:
3070 - this call goes too far (different Thumb/Thumb2 max
3071 distance)
3072 - it's a Thumb->Arm call and blx is not available, or it's a
3073 Thumb->Arm branch (not bl). A stub is needed in this case,
3074 but only if this call is not through a PLT entry. Indeed,
3075 PLT stubs handle mode switching already.
3076 */
3077 if ((!thumb2
3078 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3079 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3080 || (thumb2
3081 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3082 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3083 || ((st_type != STT_ARM_TFUNC)
3084 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3085 || (r_type == R_ARM_THM_JUMP24))
3086 && !use_plt))
3087 {
3088 if (st_type == STT_ARM_TFUNC)
3089 {
3090 /* Thumb to thumb. */
3091 if (!thumb_only)
3092 {
3093 stub_type = (info->shared | globals->pic_veneer)
3094 /* PIC stubs. */
3095 ? ((globals->use_blx
3096 && (r_type ==R_ARM_THM_CALL))
3097 /* V5T and above. Stub starts with ARM code, so
3098 we must be able to switch mode before
3099 reaching it, which is only possible for 'bl'
3100 (ie R_ARM_THM_CALL relocation). */
3101 ? arm_stub_long_branch_any_thumb_pic
3102 /* On V4T, use Thumb code only. */
3103 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3104
3105 /* non-PIC stubs. */
3106 : ((globals->use_blx
3107 && (r_type ==R_ARM_THM_CALL))
3108 /* V5T and above. */
3109 ? arm_stub_long_branch_any_any
3110 /* V4T. */
3111 : arm_stub_long_branch_v4t_thumb_thumb);
3112 }
3113 else
3114 {
3115 stub_type = (info->shared | globals->pic_veneer)
3116 /* PIC stub. */
3117 ? arm_stub_long_branch_thumb_only_pic
3118 /* non-PIC stub. */
3119 : arm_stub_long_branch_thumb_only;
3120 }
3121 }
3122 else
3123 {
3124 /* Thumb to arm. */
3125 if (sym_sec != NULL
3126 && sym_sec->owner != NULL
3127 && !INTERWORK_FLAG (sym_sec->owner))
3128 {
3129 (*_bfd_error_handler)
3130 (_("%B(%s): warning: interworking not enabled.\n"
3131 " first occurrence: %B: Thumb call to ARM"),
3132 sym_sec->owner, input_bfd, name);
3133 }
3134
3135 stub_type = (info->shared | globals->pic_veneer)
3136 /* PIC stubs. */
3137 ? ((globals->use_blx
3138 && (r_type ==R_ARM_THM_CALL))
3139 /* V5T and above. */
3140 ? arm_stub_long_branch_any_arm_pic
3141 /* V4T PIC stub. */
3142 : arm_stub_long_branch_v4t_thumb_arm_pic)
3143
3144 /* non-PIC stubs. */
3145 : ((globals->use_blx
3146 && (r_type ==R_ARM_THM_CALL))
3147 /* V5T and above. */
3148 ? arm_stub_long_branch_any_any
3149 /* V4T. */
3150 : arm_stub_long_branch_v4t_thumb_arm);
3151
3152 /* Handle v4t short branches. */
3153 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3154 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3155 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3156 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3157 }
3158 }
3159 }
3160 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3161 {
3162 if (st_type == STT_ARM_TFUNC)
3163 {
3164 /* Arm to thumb. */
3165
3166 if (sym_sec != NULL
3167 && sym_sec->owner != NULL
3168 && !INTERWORK_FLAG (sym_sec->owner))
3169 {
3170 (*_bfd_error_handler)
3171 (_("%B(%s): warning: interworking not enabled.\n"
3172 " first occurrence: %B: ARM call to Thumb"),
3173 sym_sec->owner, input_bfd, name);
3174 }
3175
3176 /* We have an extra 2-bytes reach because of
3177 the mode change (bit 24 (H) of BLX encoding). */
3178 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3179 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3180 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3181 || (r_type == R_ARM_JUMP24)
3182 || (r_type == R_ARM_PLT32))
3183 {
3184 stub_type = (info->shared | globals->pic_veneer)
3185 /* PIC stubs. */
3186 ? ((globals->use_blx)
3187 /* V5T and above. */
3188 ? arm_stub_long_branch_any_thumb_pic
3189 /* V4T stub. */
3190 : arm_stub_long_branch_v4t_arm_thumb_pic)
3191
3192 /* non-PIC stubs. */
3193 : ((globals->use_blx)
3194 /* V5T and above. */
3195 ? arm_stub_long_branch_any_any
3196 /* V4T. */
3197 : arm_stub_long_branch_v4t_arm_thumb);
3198 }
3199 }
3200 else
3201 {
3202 /* Arm to arm. */
3203 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3204 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3205 {
3206 stub_type = (info->shared | globals->pic_veneer)
3207 /* PIC stubs. */
3208 ? arm_stub_long_branch_any_arm_pic
3209 /* non-PIC stubs. */
3210 : arm_stub_long_branch_any_any;
3211 }
3212 }
3213 }
3214
3215 return stub_type;
3216 }
3217
3218 /* Build a name for an entry in the stub hash table. */
3219
3220 static char *
3221 elf32_arm_stub_name (const asection *input_section,
3222 const asection *sym_sec,
3223 const struct elf32_arm_link_hash_entry *hash,
3224 const Elf_Internal_Rela *rel)
3225 {
3226 char *stub_name;
3227 bfd_size_type len;
3228
3229 if (hash)
3230 {
3231 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3232 stub_name = bfd_malloc (len);
3233 if (stub_name != NULL)
3234 sprintf (stub_name, "%08x_%s+%x",
3235 input_section->id & 0xffffffff,
3236 hash->root.root.root.string,
3237 (int) rel->r_addend & 0xffffffff);
3238 }
3239 else
3240 {
3241 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3242 stub_name = bfd_malloc (len);
3243 if (stub_name != NULL)
3244 sprintf (stub_name, "%08x_%x:%x+%x",
3245 input_section->id & 0xffffffff,
3246 sym_sec->id & 0xffffffff,
3247 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3248 (int) rel->r_addend & 0xffffffff);
3249 }
3250
3251 return stub_name;
3252 }
3253
3254 /* Look up an entry in the stub hash. Stub entries are cached because
3255 creating the stub name takes a bit of time. */
3256
3257 static struct elf32_arm_stub_hash_entry *
3258 elf32_arm_get_stub_entry (const asection *input_section,
3259 const asection *sym_sec,
3260 struct elf_link_hash_entry *hash,
3261 const Elf_Internal_Rela *rel,
3262 struct elf32_arm_link_hash_table *htab)
3263 {
3264 struct elf32_arm_stub_hash_entry *stub_entry;
3265 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3266 const asection *id_sec;
3267
3268 if ((input_section->flags & SEC_CODE) == 0)
3269 return NULL;
3270
3271 /* If this input section is part of a group of sections sharing one
3272 stub section, then use the id of the first section in the group.
3273 Stub names need to include a section id, as there may well be
3274 more than one stub used to reach say, printf, and we need to
3275 distinguish between them. */
3276 id_sec = htab->stub_group[input_section->id].link_sec;
3277
3278 if (h != NULL && h->stub_cache != NULL
3279 && h->stub_cache->h == h
3280 && h->stub_cache->id_sec == id_sec)
3281 {
3282 stub_entry = h->stub_cache;
3283 }
3284 else
3285 {
3286 char *stub_name;
3287
3288 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3289 if (stub_name == NULL)
3290 return NULL;
3291
3292 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3293 stub_name, FALSE, FALSE);
3294 if (h != NULL)
3295 h->stub_cache = stub_entry;
3296
3297 free (stub_name);
3298 }
3299
3300 return stub_entry;
3301 }
3302
3303 /* Find or create a stub section. Returns a pointer to the stub section, and
3304 the section to which the stub section will be attached (in *LINK_SEC_P).
3305 LINK_SEC_P may be NULL. */
3306
3307 static asection *
3308 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3309 struct elf32_arm_link_hash_table *htab)
3310 {
3311 asection *link_sec;
3312 asection *stub_sec;
3313
3314 link_sec = htab->stub_group[section->id].link_sec;
3315 stub_sec = htab->stub_group[section->id].stub_sec;
3316 if (stub_sec == NULL)
3317 {
3318 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3319 if (stub_sec == NULL)
3320 {
3321 size_t namelen;
3322 bfd_size_type len;
3323 char *s_name;
3324
3325 namelen = strlen (link_sec->name);
3326 len = namelen + sizeof (STUB_SUFFIX);
3327 s_name = bfd_alloc (htab->stub_bfd, len);
3328 if (s_name == NULL)
3329 return NULL;
3330
3331 memcpy (s_name, link_sec->name, namelen);
3332 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3333 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3334 if (stub_sec == NULL)
3335 return NULL;
3336 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3337 }
3338 htab->stub_group[section->id].stub_sec = stub_sec;
3339 }
3340
3341 if (link_sec_p)
3342 *link_sec_p = link_sec;
3343
3344 return stub_sec;
3345 }
3346
3347 /* Add a new stub entry to the stub hash. Not all fields of the new
3348 stub entry are initialised. */
3349
3350 static struct elf32_arm_stub_hash_entry *
3351 elf32_arm_add_stub (const char *stub_name,
3352 asection *section,
3353 struct elf32_arm_link_hash_table *htab)
3354 {
3355 asection *link_sec;
3356 asection *stub_sec;
3357 struct elf32_arm_stub_hash_entry *stub_entry;
3358
3359 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3360 if (stub_sec == NULL)
3361 return NULL;
3362
3363 /* Enter this entry into the linker stub hash table. */
3364 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3365 TRUE, FALSE);
3366 if (stub_entry == NULL)
3367 {
3368 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3369 section->owner,
3370 stub_name);
3371 return NULL;
3372 }
3373
3374 stub_entry->stub_sec = stub_sec;
3375 stub_entry->stub_offset = 0;
3376 stub_entry->id_sec = link_sec;
3377
3378 return stub_entry;
3379 }
3380
3381 /* Store an Arm insn into an output section not processed by
3382 elf32_arm_write_section. */
3383
3384 static void
3385 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3386 bfd * output_bfd, bfd_vma val, void * ptr)
3387 {
3388 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3389 bfd_putl32 (val, ptr);
3390 else
3391 bfd_putb32 (val, ptr);
3392 }
3393
3394 /* Store a 16-bit Thumb insn into an output section not processed by
3395 elf32_arm_write_section. */
3396
3397 static void
3398 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3399 bfd * output_bfd, bfd_vma val, void * ptr)
3400 {
3401 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3402 bfd_putl16 (val, ptr);
3403 else
3404 bfd_putb16 (val, ptr);
3405 }
3406
3407 static bfd_reloc_status_type elf32_arm_final_link_relocate
3408 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3409 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3410 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3411
3412 static bfd_boolean
3413 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3414 void * in_arg)
3415 {
3416 #define MAXRELOCS 2
3417 struct elf32_arm_stub_hash_entry *stub_entry;
3418 struct bfd_link_info *info;
3419 struct elf32_arm_link_hash_table *htab;
3420 asection *stub_sec;
3421 bfd *stub_bfd;
3422 bfd_vma stub_addr;
3423 bfd_byte *loc;
3424 bfd_vma sym_value;
3425 int template_size;
3426 int size;
3427 const insn_sequence *template;
3428 int i;
3429 struct elf32_arm_link_hash_table * globals;
3430 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3431 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3432 int nrelocs = 0;
3433
3434 /* Massage our args to the form they really have. */
3435 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3436 info = (struct bfd_link_info *) in_arg;
3437
3438 globals = elf32_arm_hash_table (info);
3439
3440 htab = elf32_arm_hash_table (info);
3441 stub_sec = stub_entry->stub_sec;
3442
3443 /* Make a note of the offset within the stubs for this entry. */
3444 stub_entry->stub_offset = stub_sec->size;
3445 loc = stub_sec->contents + stub_entry->stub_offset;
3446
3447 stub_bfd = stub_sec->owner;
3448
3449 /* This is the address of the start of the stub. */
3450 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3451 + stub_entry->stub_offset;
3452
3453 /* This is the address of the stub destination. */
3454 sym_value = (stub_entry->target_value
3455 + stub_entry->target_section->output_offset
3456 + stub_entry->target_section->output_section->vma);
3457
3458 template = stub_entry->stub_template;
3459 template_size = stub_entry->stub_template_size;
3460
3461 size = 0;
3462 for (i = 0; i < template_size; i++)
3463 {
3464 switch (template[i].type)
3465 {
3466 case THUMB16_TYPE:
3467 {
3468 bfd_vma data = template[i].data;
3469 if (template[i].reloc_addend != 0)
3470 {
3471 /* We've borrowed the reloc_addend field to mean we should
3472 insert a condition code into this (Thumb-1 branch)
3473 instruction. See THUMB16_BCOND_INSN. */
3474 BFD_ASSERT ((data & 0xff00) == 0xd000);
3475 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3476 }
3477 put_thumb_insn (globals, stub_bfd, data, loc + size);
3478 size += 2;
3479 }
3480 break;
3481
3482 case THUMB32_TYPE:
3483 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3484 loc + size);
3485 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3486 loc + size + 2);
3487 if (template[i].r_type != R_ARM_NONE)
3488 {
3489 stub_reloc_idx[nrelocs] = i;
3490 stub_reloc_offset[nrelocs++] = size;
3491 }
3492 size += 4;
3493 break;
3494
3495 case ARM_TYPE:
3496 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3497 /* Handle cases where the target is encoded within the
3498 instruction. */
3499 if (template[i].r_type == R_ARM_JUMP24)
3500 {
3501 stub_reloc_idx[nrelocs] = i;
3502 stub_reloc_offset[nrelocs++] = size;
3503 }
3504 size += 4;
3505 break;
3506
3507 case DATA_TYPE:
3508 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3509 stub_reloc_idx[nrelocs] = i;
3510 stub_reloc_offset[nrelocs++] = size;
3511 size += 4;
3512 break;
3513
3514 default:
3515 BFD_FAIL ();
3516 return FALSE;
3517 }
3518 }
3519
3520 stub_sec->size += size;
3521
3522 /* Stub size has already been computed in arm_size_one_stub. Check
3523 consistency. */
3524 BFD_ASSERT (size == stub_entry->stub_size);
3525
3526 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3527 if (stub_entry->st_type == STT_ARM_TFUNC)
3528 sym_value |= 1;
3529
3530 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3531 in each stub. */
3532 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3533
3534 for (i = 0; i < nrelocs; i++)
3535 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3536 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3537 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3538 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3539 {
3540 Elf_Internal_Rela rel;
3541 bfd_boolean unresolved_reloc;
3542 char *error_message;
3543 int sym_flags
3544 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3545 ? STT_ARM_TFUNC : 0;
3546 bfd_vma points_to = sym_value + stub_entry->target_addend;
3547
3548 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3549 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3550 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3551
3552 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3553 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3554 template should refer back to the instruction after the original
3555 branch. */
3556 points_to = sym_value;
3557
3558 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3559 properly. We should probably use this function unconditionally,
3560 rather than only for certain relocations listed in the enclosing
3561 conditional, for the sake of consistency. */
3562 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3563 (template[stub_reloc_idx[i]].r_type),
3564 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3565 points_to, info, stub_entry->target_section, "", sym_flags,
3566 (struct elf_link_hash_entry *) stub_entry, &unresolved_reloc,
3567 &error_message);
3568 }
3569 else
3570 {
3571 _bfd_final_link_relocate (elf32_arm_howto_from_type
3572 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3573 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3574 sym_value + stub_entry->target_addend,
3575 template[stub_reloc_idx[i]].reloc_addend);
3576 }
3577
3578 return TRUE;
3579 #undef MAXRELOCS
3580 }
3581
3582 /* Calculate the template, template size and instruction size for a stub.
3583 Return value is the instruction size. */
3584
3585 static unsigned int
3586 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3587 const insn_sequence **stub_template,
3588 int *stub_template_size)
3589 {
3590 const insn_sequence *template = NULL;
3591 int template_size = 0, i;
3592 unsigned int size;
3593
3594 template = stub_definitions[stub_type].template;
3595 template_size = stub_definitions[stub_type].template_size;
3596
3597 size = 0;
3598 for (i = 0; i < template_size; i++)
3599 {
3600 switch (template[i].type)
3601 {
3602 case THUMB16_TYPE:
3603 size += 2;
3604 break;
3605
3606 case ARM_TYPE:
3607 case THUMB32_TYPE:
3608 case DATA_TYPE:
3609 size += 4;
3610 break;
3611
3612 default:
3613 BFD_FAIL ();
3614 return FALSE;
3615 }
3616 }
3617
3618 if (stub_template)
3619 *stub_template = template;
3620
3621 if (stub_template_size)
3622 *stub_template_size = template_size;
3623
3624 return size;
3625 }
3626
3627 /* As above, but don't actually build the stub. Just bump offset so
3628 we know stub section sizes. */
3629
3630 static bfd_boolean
3631 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3632 void * in_arg)
3633 {
3634 struct elf32_arm_stub_hash_entry *stub_entry;
3635 struct elf32_arm_link_hash_table *htab;
3636 const insn_sequence *template;
3637 int template_size, size;
3638
3639 /* Massage our args to the form they really have. */
3640 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3641 htab = (struct elf32_arm_link_hash_table *) in_arg;
3642
3643 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3644 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3645
3646 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3647 &template_size);
3648
3649 stub_entry->stub_size = size;
3650 stub_entry->stub_template = template;
3651 stub_entry->stub_template_size = template_size;
3652
3653 size = (size + 7) & ~7;
3654 stub_entry->stub_sec->size += size;
3655
3656 return TRUE;
3657 }
3658
3659 /* External entry points for sizing and building linker stubs. */
3660
3661 /* Set up various things so that we can make a list of input sections
3662 for each output section included in the link. Returns -1 on error,
3663 0 when no stubs will be needed, and 1 on success. */
3664
3665 int
3666 elf32_arm_setup_section_lists (bfd *output_bfd,
3667 struct bfd_link_info *info)
3668 {
3669 bfd *input_bfd;
3670 unsigned int bfd_count;
3671 int top_id, top_index;
3672 asection *section;
3673 asection **input_list, **list;
3674 bfd_size_type amt;
3675 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3676
3677 if (! is_elf_hash_table (htab))
3678 return 0;
3679
3680 /* Count the number of input BFDs and find the top input section id. */
3681 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3682 input_bfd != NULL;
3683 input_bfd = input_bfd->link_next)
3684 {
3685 bfd_count += 1;
3686 for (section = input_bfd->sections;
3687 section != NULL;
3688 section = section->next)
3689 {
3690 if (top_id < section->id)
3691 top_id = section->id;
3692 }
3693 }
3694 htab->bfd_count = bfd_count;
3695
3696 amt = sizeof (struct map_stub) * (top_id + 1);
3697 htab->stub_group = bfd_zmalloc (amt);
3698 if (htab->stub_group == NULL)
3699 return -1;
3700
3701 /* We can't use output_bfd->section_count here to find the top output
3702 section index as some sections may have been removed, and
3703 _bfd_strip_section_from_output doesn't renumber the indices. */
3704 for (section = output_bfd->sections, top_index = 0;
3705 section != NULL;
3706 section = section->next)
3707 {
3708 if (top_index < section->index)
3709 top_index = section->index;
3710 }
3711
3712 htab->top_index = top_index;
3713 amt = sizeof (asection *) * (top_index + 1);
3714 input_list = bfd_malloc (amt);
3715 htab->input_list = input_list;
3716 if (input_list == NULL)
3717 return -1;
3718
3719 /* For sections we aren't interested in, mark their entries with a
3720 value we can check later. */
3721 list = input_list + top_index;
3722 do
3723 *list = bfd_abs_section_ptr;
3724 while (list-- != input_list);
3725
3726 for (section = output_bfd->sections;
3727 section != NULL;
3728 section = section->next)
3729 {
3730 if ((section->flags & SEC_CODE) != 0)
3731 input_list[section->index] = NULL;
3732 }
3733
3734 return 1;
3735 }
3736
3737 /* The linker repeatedly calls this function for each input section,
3738 in the order that input sections are linked into output sections.
3739 Build lists of input sections to determine groupings between which
3740 we may insert linker stubs. */
3741
3742 void
3743 elf32_arm_next_input_section (struct bfd_link_info *info,
3744 asection *isec)
3745 {
3746 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3747
3748 if (isec->output_section->index <= htab->top_index)
3749 {
3750 asection **list = htab->input_list + isec->output_section->index;
3751
3752 if (*list != bfd_abs_section_ptr)
3753 {
3754 /* Steal the link_sec pointer for our list. */
3755 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3756 /* This happens to make the list in reverse order,
3757 which we reverse later. */
3758 PREV_SEC (isec) = *list;
3759 *list = isec;
3760 }
3761 }
3762 }
3763
3764 /* See whether we can group stub sections together. Grouping stub
3765 sections may result in fewer stubs. More importantly, we need to
3766 put all .init* and .fini* stubs at the end of the .init or
3767 .fini output sections respectively, because glibc splits the
3768 _init and _fini functions into multiple parts. Putting a stub in
3769 the middle of a function is not a good idea. */
3770
3771 static void
3772 group_sections (struct elf32_arm_link_hash_table *htab,
3773 bfd_size_type stub_group_size,
3774 bfd_boolean stubs_always_after_branch)
3775 {
3776 asection **list = htab->input_list;
3777
3778 do
3779 {
3780 asection *tail = *list;
3781 asection *head;
3782
3783 if (tail == bfd_abs_section_ptr)
3784 continue;
3785
3786 /* Reverse the list: we must avoid placing stubs at the
3787 beginning of the section because the beginning of the text
3788 section may be required for an interrupt vector in bare metal
3789 code. */
3790 #define NEXT_SEC PREV_SEC
3791 head = NULL;
3792 while (tail != NULL)
3793 {
3794 /* Pop from tail. */
3795 asection *item = tail;
3796 tail = PREV_SEC (item);
3797
3798 /* Push on head. */
3799 NEXT_SEC (item) = head;
3800 head = item;
3801 }
3802
3803 while (head != NULL)
3804 {
3805 asection *curr;
3806 asection *next;
3807 bfd_vma stub_group_start = head->output_offset;
3808 bfd_vma end_of_next;
3809
3810 curr = head;
3811 while (NEXT_SEC (curr) != NULL)
3812 {
3813 next = NEXT_SEC (curr);
3814 end_of_next = next->output_offset + next->size;
3815 if (end_of_next - stub_group_start >= stub_group_size)
3816 /* End of NEXT is too far from start, so stop. */
3817 break;
3818 /* Add NEXT to the group. */
3819 curr = next;
3820 }
3821
3822 /* OK, the size from the start to the start of CURR is less
3823 than stub_group_size and thus can be handled by one stub
3824 section. (Or the head section is itself larger than
3825 stub_group_size, in which case we may be toast.)
3826 We should really be keeping track of the total size of
3827 stubs added here, as stubs contribute to the final output
3828 section size. */
3829 do
3830 {
3831 next = NEXT_SEC (head);
3832 /* Set up this stub group. */
3833 htab->stub_group[head->id].link_sec = curr;
3834 }
3835 while (head != curr && (head = next) != NULL);
3836
3837 /* But wait, there's more! Input sections up to stub_group_size
3838 bytes after the stub section can be handled by it too. */
3839 if (!stubs_always_after_branch)
3840 {
3841 stub_group_start = curr->output_offset + curr->size;
3842
3843 while (next != NULL)
3844 {
3845 end_of_next = next->output_offset + next->size;
3846 if (end_of_next - stub_group_start >= stub_group_size)
3847 /* End of NEXT is too far from stubs, so stop. */
3848 break;
3849 /* Add NEXT to the stub group. */
3850 head = next;
3851 next = NEXT_SEC (head);
3852 htab->stub_group[head->id].link_sec = curr;
3853 }
3854 }
3855 head = next;
3856 }
3857 }
3858 while (list++ != htab->input_list + htab->top_index);
3859
3860 free (htab->input_list);
3861 #undef PREV_SEC
3862 #undef NEXT_SEC
3863 }
3864
3865 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3866 erratum fix. */
3867
3868 static int
3869 a8_reloc_compare (const void *a, const void *b)
3870 {
3871 const struct a8_erratum_reloc *ra = a, *rb = b;
3872
3873 if (ra->from < rb->from)
3874 return -1;
3875 else if (ra->from > rb->from)
3876 return 1;
3877 else
3878 return 0;
3879 }
3880
3881 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3882 const char *, char **);
3883
3884 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3885 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3886 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Return 1 if an error occurs, 0
3887 otherwise. */
3888
3889 static int
3890 cortex_a8_erratum_scan (bfd *input_bfd, struct bfd_link_info *info,
3891 struct a8_erratum_fix **a8_fixes_p,
3892 unsigned int *num_a8_fixes_p,
3893 unsigned int *a8_fix_table_size_p,
3894 struct a8_erratum_reloc *a8_relocs,
3895 unsigned int num_a8_relocs)
3896 {
3897 asection *section;
3898 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3899 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3900 unsigned int num_a8_fixes = *num_a8_fixes_p;
3901 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3902
3903 for (section = input_bfd->sections;
3904 section != NULL;
3905 section = section->next)
3906 {
3907 bfd_byte *contents = NULL;
3908 struct _arm_elf_section_data *sec_data;
3909 unsigned int span;
3910 bfd_vma base_vma;
3911
3912 if (elf_section_type (section) != SHT_PROGBITS
3913 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3914 || (section->flags & SEC_EXCLUDE) != 0
3915 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3916 || (section->output_section == bfd_abs_section_ptr))
3917 continue;
3918
3919 base_vma = section->output_section->vma + section->output_offset;
3920
3921 if (elf_section_data (section)->this_hdr.contents != NULL)
3922 contents = elf_section_data (section)->this_hdr.contents;
3923 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3924 return 1;
3925
3926 sec_data = elf32_arm_section_data (section);
3927
3928 for (span = 0; span < sec_data->mapcount; span++)
3929 {
3930 unsigned int span_start = sec_data->map[span].vma;
3931 unsigned int span_end = (span == sec_data->mapcount - 1)
3932 ? section->size : sec_data->map[span + 1].vma;
3933 unsigned int i;
3934 char span_type = sec_data->map[span].type;
3935 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3936
3937 if (span_type != 't')
3938 continue;
3939
3940 /* Span is entirely within a single 4KB region: skip scanning. */
3941 if (((base_vma + span_start) & ~0xfff)
3942 == ((base_vma + span_end) & ~0xfff))
3943 continue;
3944
3945 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3946
3947 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3948 * The branch target is in the same 4KB region as the
3949 first half of the branch.
3950 * The instruction before the branch is a 32-bit
3951 length non-branch instruction.
3952 */
3953
3954 for (i = span_start; i < span_end;)
3955 {
3956 unsigned int insn = bfd_getl16 (&contents[i]);
3957 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3958 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3959
3960 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3961 insn_32bit = TRUE;
3962
3963 if (insn_32bit)
3964 {
3965 /* Load the rest of the insn (in manual-friendly order). */
3966 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3967
3968 /* Encoding T4: B<c>.W. */
3969 is_b = (insn & 0xf800d000) == 0xf0009000;
3970 /* Encoding T1: BL<c>.W. */
3971 is_bl = (insn & 0xf800d000) == 0xf000d000;
3972 /* Encoding T2: BLX<c>.W. */
3973 is_blx = (insn & 0xf800d000) == 0xf000c000;
3974 /* Encoding T3: B<c>.W (not permitted in IT block). */
3975 is_bcc = (insn & 0xf800d000) == 0xf0008000
3976 && (insn & 0x07f00000) != 0x03800000;
3977 }
3978
3979 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3980
3981 if (((base_vma + i) & 0xfff) == 0xffe && insn_32bit
3982 && is_32bit_branch && last_was_32bit && !last_was_branch)
3983 {
3984 bfd_vma offset;
3985 bfd_boolean force_target_arm = FALSE;
3986 bfd_boolean force_target_thumb = FALSE;
3987 bfd_vma target;
3988 enum elf32_arm_stub_type stub_type = arm_stub_none;
3989 struct a8_erratum_reloc key, *found;
3990
3991 key.from = base_vma + i;
3992 found = bsearch (&key, a8_relocs, num_a8_relocs,
3993 sizeof (struct a8_erratum_reloc),
3994 &a8_reloc_compare);
3995
3996 if (found)
3997 {
3998 char *error_message = NULL;
3999 struct elf_link_hash_entry *entry;
4000
4001 /* We don't care about the error returned from this
4002 function, only if there is glue or not. */
4003 entry = find_thumb_glue (info, found->sym_name,
4004 &error_message);
4005
4006 if (entry)
4007 found->non_a8_stub = TRUE;
4008
4009 if (found->r_type == R_ARM_THM_CALL
4010 && found->st_type != STT_ARM_TFUNC)
4011 force_target_arm = TRUE;
4012 else if (found->r_type == R_ARM_THM_CALL
4013 && found->st_type == STT_ARM_TFUNC)
4014 force_target_thumb = TRUE;
4015 }
4016
4017 /* Check if we have an offending branch instruction. */
4018
4019 if (found && found->non_a8_stub)
4020 /* We've already made a stub for this instruction, e.g.
4021 it's a long branch or a Thumb->ARM stub. Assume that
4022 stub will suffice to work around the A8 erratum (see
4023 setting of always_after_branch above). */
4024 ;
4025 else if (is_bcc)
4026 {
4027 offset = (insn & 0x7ff) << 1;
4028 offset |= (insn & 0x3f0000) >> 4;
4029 offset |= (insn & 0x2000) ? 0x40000 : 0;
4030 offset |= (insn & 0x800) ? 0x80000 : 0;
4031 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4032 if (offset & 0x100000)
4033 offset |= ~0xfffff;
4034 stub_type = arm_stub_a8_veneer_b_cond;
4035 }
4036 else if (is_b || is_bl || is_blx)
4037 {
4038 int s = (insn & 0x4000000) != 0;
4039 int j1 = (insn & 0x2000) != 0;
4040 int j2 = (insn & 0x800) != 0;
4041 int i1 = !(j1 ^ s);
4042 int i2 = !(j2 ^ s);
4043
4044 offset = (insn & 0x7ff) << 1;
4045 offset |= (insn & 0x3ff0000) >> 4;
4046 offset |= i2 << 22;
4047 offset |= i1 << 23;
4048 offset |= s << 24;
4049 if (offset & 0x1000000)
4050 offset |= ~0xffffff;
4051
4052 if (is_blx)
4053 offset &= ~3u;
4054
4055 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4056 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4057 }
4058
4059 if (stub_type != arm_stub_none)
4060 {
4061 bfd_vma pc_for_insn = base_vma + i + 4;
4062
4063 /* The original instruction is a BL, but the target is
4064 an ARM instruction. If we were not making a stub,
4065 the BL would have been converted to a BLX. Use the
4066 BLX stub instead in that case. */
4067 if (htab->use_blx && force_target_arm
4068 && stub_type == arm_stub_a8_veneer_bl)
4069 {
4070 stub_type = arm_stub_a8_veneer_blx;
4071 is_blx = TRUE;
4072 is_bl = FALSE;
4073 }
4074 /* Conversely, if the original instruction was
4075 BLX but the target is Thumb mode, use the BL
4076 stub. */
4077 else if (force_target_thumb
4078 && stub_type == arm_stub_a8_veneer_blx)
4079 {
4080 stub_type = arm_stub_a8_veneer_bl;
4081 is_blx = FALSE;
4082 is_bl = TRUE;
4083 }
4084
4085 if (is_blx)
4086 pc_for_insn &= ~3u;
4087
4088 /* If we found a relocation, use the proper destination,
4089 not the offset in the (unrelocated) instruction.
4090 Note this is always done if we switched the stub type
4091 above. */
4092 if (found)
4093 offset = found->destination - pc_for_insn;
4094
4095 target = pc_for_insn + offset;
4096
4097 /* The BLX stub is ARM-mode code. Adjust the offset to
4098 take the different PC value (+8 instead of +4) into
4099 account. */
4100 if (stub_type == arm_stub_a8_veneer_blx)
4101 offset += 4;
4102
4103 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4104 {
4105 char *stub_name;
4106
4107 if (num_a8_fixes == a8_fix_table_size)
4108 {
4109 a8_fix_table_size *= 2;
4110 a8_fixes = bfd_realloc (a8_fixes,
4111 sizeof (struct a8_erratum_fix)
4112 * a8_fix_table_size);
4113 }
4114
4115 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4116 if (stub_name != NULL)
4117 sprintf (stub_name, "%x:%x", section->id, i);
4118
4119 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4120 a8_fixes[num_a8_fixes].section = section;
4121 a8_fixes[num_a8_fixes].offset = i;
4122 a8_fixes[num_a8_fixes].addend = offset;
4123 a8_fixes[num_a8_fixes].orig_insn = insn;
4124 a8_fixes[num_a8_fixes].stub_name = stub_name;
4125 a8_fixes[num_a8_fixes].stub_type = stub_type;
4126
4127 num_a8_fixes++;
4128 }
4129 }
4130 }
4131
4132 i += insn_32bit ? 4 : 2;
4133 last_was_32bit = insn_32bit;
4134 last_was_branch = is_32bit_branch;
4135 }
4136 }
4137
4138 if (elf_section_data (section)->this_hdr.contents == NULL)
4139 free (contents);
4140 }
4141
4142 *a8_fixes_p = a8_fixes;
4143 *num_a8_fixes_p = num_a8_fixes;
4144 *a8_fix_table_size_p = a8_fix_table_size;
4145
4146 return 0;
4147 }
4148
4149 /* Determine and set the size of the stub section for a final link.
4150
4151 The basic idea here is to examine all the relocations looking for
4152 PC-relative calls to a target that is unreachable with a "bl"
4153 instruction. */
4154
4155 bfd_boolean
4156 elf32_arm_size_stubs (bfd *output_bfd,
4157 bfd *stub_bfd,
4158 struct bfd_link_info *info,
4159 bfd_signed_vma group_size,
4160 asection * (*add_stub_section) (const char *, asection *),
4161 void (*layout_sections_again) (void))
4162 {
4163 bfd_size_type stub_group_size;
4164 bfd_boolean stubs_always_after_branch;
4165 bfd_boolean stub_changed = 0;
4166 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4167 struct a8_erratum_fix *a8_fixes = NULL;
4168 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4169 struct a8_erratum_reloc *a8_relocs = NULL;
4170 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4171
4172 if (htab->fix_cortex_a8)
4173 {
4174 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4175 * a8_fix_table_size);
4176 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4177 * a8_reloc_table_size);
4178 }
4179
4180 /* Propagate mach to stub bfd, because it may not have been
4181 finalized when we created stub_bfd. */
4182 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4183 bfd_get_mach (output_bfd));
4184
4185 /* Stash our params away. */
4186 htab->stub_bfd = stub_bfd;
4187 htab->add_stub_section = add_stub_section;
4188 htab->layout_sections_again = layout_sections_again;
4189 stubs_always_after_branch = group_size < 0;
4190
4191 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4192 as the first half of a 32-bit branch straddling two 4K pages. This is a
4193 crude way of enforcing that. */
4194 if (htab->fix_cortex_a8)
4195 stubs_always_after_branch = 1;
4196
4197 if (group_size < 0)
4198 stub_group_size = -group_size;
4199 else
4200 stub_group_size = group_size;
4201
4202 if (stub_group_size == 1)
4203 {
4204 /* Default values. */
4205 /* Thumb branch range is +-4MB has to be used as the default
4206 maximum size (a given section can contain both ARM and Thumb
4207 code, so the worst case has to be taken into account).
4208
4209 This value is 24K less than that, which allows for 2025
4210 12-byte stubs. If we exceed that, then we will fail to link.
4211 The user will have to relink with an explicit group size
4212 option. */
4213 stub_group_size = 4170000;
4214 }
4215
4216 group_sections (htab, stub_group_size, stubs_always_after_branch);
4217
4218 while (1)
4219 {
4220 bfd *input_bfd;
4221 unsigned int bfd_indx;
4222 asection *stub_sec;
4223
4224 num_a8_fixes = 0;
4225
4226 for (input_bfd = info->input_bfds, bfd_indx = 0;
4227 input_bfd != NULL;
4228 input_bfd = input_bfd->link_next, bfd_indx++)
4229 {
4230 Elf_Internal_Shdr *symtab_hdr;
4231 asection *section;
4232 Elf_Internal_Sym *local_syms = NULL;
4233
4234 num_a8_relocs = 0;
4235
4236 /* We'll need the symbol table in a second. */
4237 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4238 if (symtab_hdr->sh_info == 0)
4239 continue;
4240
4241 /* Walk over each section attached to the input bfd. */
4242 for (section = input_bfd->sections;
4243 section != NULL;
4244 section = section->next)
4245 {
4246 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4247
4248 /* If there aren't any relocs, then there's nothing more
4249 to do. */
4250 if ((section->flags & SEC_RELOC) == 0
4251 || section->reloc_count == 0
4252 || (section->flags & SEC_CODE) == 0)
4253 continue;
4254
4255 /* If this section is a link-once section that will be
4256 discarded, then don't create any stubs. */
4257 if (section->output_section == NULL
4258 || section->output_section->owner != output_bfd)
4259 continue;
4260
4261 /* Get the relocs. */
4262 internal_relocs
4263 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4264 NULL, info->keep_memory);
4265 if (internal_relocs == NULL)
4266 goto error_ret_free_local;
4267
4268 /* Now examine each relocation. */
4269 irela = internal_relocs;
4270 irelaend = irela + section->reloc_count;
4271 for (; irela < irelaend; irela++)
4272 {
4273 unsigned int r_type, r_indx;
4274 enum elf32_arm_stub_type stub_type;
4275 struct elf32_arm_stub_hash_entry *stub_entry;
4276 asection *sym_sec;
4277 bfd_vma sym_value;
4278 bfd_vma destination;
4279 struct elf32_arm_link_hash_entry *hash;
4280 const char *sym_name;
4281 char *stub_name;
4282 const asection *id_sec;
4283 unsigned char st_type;
4284 bfd_boolean created_stub = FALSE;
4285
4286 r_type = ELF32_R_TYPE (irela->r_info);
4287 r_indx = ELF32_R_SYM (irela->r_info);
4288
4289 if (r_type >= (unsigned int) R_ARM_max)
4290 {
4291 bfd_set_error (bfd_error_bad_value);
4292 error_ret_free_internal:
4293 if (elf_section_data (section)->relocs == NULL)
4294 free (internal_relocs);
4295 goto error_ret_free_local;
4296 }
4297
4298 /* Only look for stubs on branch instructions. */
4299 if ((r_type != (unsigned int) R_ARM_CALL)
4300 && (r_type != (unsigned int) R_ARM_THM_CALL)
4301 && (r_type != (unsigned int) R_ARM_JUMP24)
4302 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4303 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4304 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4305 && (r_type != (unsigned int) R_ARM_PLT32))
4306 continue;
4307
4308 /* Now determine the call target, its name, value,
4309 section. */
4310 sym_sec = NULL;
4311 sym_value = 0;
4312 destination = 0;
4313 hash = NULL;
4314 sym_name = NULL;
4315 if (r_indx < symtab_hdr->sh_info)
4316 {
4317 /* It's a local symbol. */
4318 Elf_Internal_Sym *sym;
4319 Elf_Internal_Shdr *hdr;
4320
4321 if (local_syms == NULL)
4322 {
4323 local_syms
4324 = (Elf_Internal_Sym *) symtab_hdr->contents;
4325 if (local_syms == NULL)
4326 local_syms
4327 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4328 symtab_hdr->sh_info, 0,
4329 NULL, NULL, NULL);
4330 if (local_syms == NULL)
4331 goto error_ret_free_internal;
4332 }
4333
4334 sym = local_syms + r_indx;
4335 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4336 sym_sec = hdr->bfd_section;
4337 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4338 sym_value = sym->st_value;
4339 destination = (sym_value + irela->r_addend
4340 + sym_sec->output_offset
4341 + sym_sec->output_section->vma);
4342 st_type = ELF_ST_TYPE (sym->st_info);
4343 sym_name
4344 = bfd_elf_string_from_elf_section (input_bfd,
4345 symtab_hdr->sh_link,
4346 sym->st_name);
4347 }
4348 else
4349 {
4350 /* It's an external symbol. */
4351 int e_indx;
4352
4353 e_indx = r_indx - symtab_hdr->sh_info;
4354 hash = ((struct elf32_arm_link_hash_entry *)
4355 elf_sym_hashes (input_bfd)[e_indx]);
4356
4357 while (hash->root.root.type == bfd_link_hash_indirect
4358 || hash->root.root.type == bfd_link_hash_warning)
4359 hash = ((struct elf32_arm_link_hash_entry *)
4360 hash->root.root.u.i.link);
4361
4362 if (hash->root.root.type == bfd_link_hash_defined
4363 || hash->root.root.type == bfd_link_hash_defweak)
4364 {
4365 sym_sec = hash->root.root.u.def.section;
4366 sym_value = hash->root.root.u.def.value;
4367 if (sym_sec->output_section != NULL)
4368 destination = (sym_value + irela->r_addend
4369 + sym_sec->output_offset
4370 + sym_sec->output_section->vma);
4371 }
4372 else if ((hash->root.root.type == bfd_link_hash_undefined)
4373 || (hash->root.root.type == bfd_link_hash_undefweak))
4374 {
4375 /* For a shared library, use the PLT stub as
4376 target address to decide whether a long
4377 branch stub is needed.
4378 For absolute code, they cannot be handled. */
4379 struct elf32_arm_link_hash_table *globals =
4380 elf32_arm_hash_table (info);
4381
4382 if (globals->splt != NULL && hash != NULL
4383 && hash->root.plt.offset != (bfd_vma) -1)
4384 {
4385 sym_sec = globals->splt;
4386 sym_value = hash->root.plt.offset;
4387 if (sym_sec->output_section != NULL)
4388 destination = (sym_value
4389 + sym_sec->output_offset
4390 + sym_sec->output_section->vma);
4391 }
4392 else
4393 continue;
4394 }
4395 else
4396 {
4397 bfd_set_error (bfd_error_bad_value);
4398 goto error_ret_free_internal;
4399 }
4400 st_type = ELF_ST_TYPE (hash->root.type);
4401 sym_name = hash->root.root.root.string;
4402 }
4403
4404 do
4405 {
4406 /* Determine what (if any) linker stub is needed. */
4407 stub_type = arm_type_of_stub (info, section, irela,
4408 st_type, hash,
4409 destination, sym_sec,
4410 input_bfd, sym_name);
4411 if (stub_type == arm_stub_none)
4412 break;
4413
4414 /* Support for grouping stub sections. */
4415 id_sec = htab->stub_group[section->id].link_sec;
4416
4417 /* Get the name of this stub. */
4418 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4419 irela);
4420 if (!stub_name)
4421 goto error_ret_free_internal;
4422
4423 /* We've either created a stub for this reloc already,
4424 or we are about to. */
4425 created_stub = TRUE;
4426
4427 stub_entry = arm_stub_hash_lookup
4428 (&htab->stub_hash_table, stub_name,
4429 FALSE, FALSE);
4430 if (stub_entry != NULL)
4431 {
4432 /* The proper stub has already been created. */
4433 free (stub_name);
4434 break;
4435 }
4436
4437 stub_entry = elf32_arm_add_stub (stub_name, section,
4438 htab);
4439 if (stub_entry == NULL)
4440 {
4441 free (stub_name);
4442 goto error_ret_free_internal;
4443 }
4444
4445 stub_entry->target_value = sym_value;
4446 stub_entry->target_section = sym_sec;
4447 stub_entry->stub_type = stub_type;
4448 stub_entry->h = hash;
4449 stub_entry->st_type = st_type;
4450
4451 if (sym_name == NULL)
4452 sym_name = "unnamed";
4453 stub_entry->output_name
4454 = bfd_alloc (htab->stub_bfd,
4455 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4456 + strlen (sym_name));
4457 if (stub_entry->output_name == NULL)
4458 {
4459 free (stub_name);
4460 goto error_ret_free_internal;
4461 }
4462
4463 /* For historical reasons, use the existing names for
4464 ARM-to-Thumb and Thumb-to-ARM stubs. */
4465 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4466 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4467 && st_type != STT_ARM_TFUNC)
4468 sprintf (stub_entry->output_name,
4469 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4470 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4471 || (r_type == (unsigned int) R_ARM_JUMP24))
4472 && st_type == STT_ARM_TFUNC)
4473 sprintf (stub_entry->output_name,
4474 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4475 else
4476 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4477 sym_name);
4478
4479 stub_changed = TRUE;
4480 }
4481 while (0);
4482
4483 /* Look for relocations which might trigger Cortex-A8
4484 erratum. */
4485 if (htab->fix_cortex_a8
4486 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4487 || r_type == (unsigned int) R_ARM_THM_JUMP19
4488 || r_type == (unsigned int) R_ARM_THM_CALL
4489 || r_type == (unsigned int) R_ARM_THM_XPC22))
4490 {
4491 bfd_vma from = section->output_section->vma
4492 + section->output_offset
4493 + irela->r_offset;
4494
4495 if ((from & 0xfff) == 0xffe)
4496 {
4497 /* Found a candidate. Note we haven't checked the
4498 destination is within 4K here: if we do so (and
4499 don't create an entry in a8_relocs) we can't tell
4500 that a branch should have been relocated when
4501 scanning later. */
4502 if (num_a8_relocs == a8_reloc_table_size)
4503 {
4504 a8_reloc_table_size *= 2;
4505 a8_relocs = bfd_realloc (a8_relocs,
4506 sizeof (struct a8_erratum_reloc)
4507 * a8_reloc_table_size);
4508 }
4509
4510 a8_relocs[num_a8_relocs].from = from;
4511 a8_relocs[num_a8_relocs].destination = destination;
4512 a8_relocs[num_a8_relocs].r_type = r_type;
4513 a8_relocs[num_a8_relocs].st_type = st_type;
4514 a8_relocs[num_a8_relocs].sym_name = sym_name;
4515 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4516
4517 num_a8_relocs++;
4518 }
4519 }
4520 }
4521
4522 /* We're done with the internal relocs, free them. */
4523 if (elf_section_data (section)->relocs == NULL)
4524 free (internal_relocs);
4525 }
4526
4527 if (htab->fix_cortex_a8)
4528 {
4529 /* Sort relocs which might apply to Cortex-A8 erratum. */
4530 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4531 &a8_reloc_compare);
4532
4533 /* Scan for branches which might trigger Cortex-A8 erratum. */
4534 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4535 &num_a8_fixes, &a8_fix_table_size,
4536 a8_relocs, num_a8_relocs) != 0)
4537 goto error_ret_free_local;
4538 }
4539 }
4540
4541 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4542 stub_changed = TRUE;
4543
4544 if (!stub_changed)
4545 break;
4546
4547 /* OK, we've added some stubs. Find out the new size of the
4548 stub sections. */
4549 for (stub_sec = htab->stub_bfd->sections;
4550 stub_sec != NULL;
4551 stub_sec = stub_sec->next)
4552 {
4553 /* Ignore non-stub sections. */
4554 if (!strstr (stub_sec->name, STUB_SUFFIX))
4555 continue;
4556
4557 stub_sec->size = 0;
4558 }
4559
4560 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4561
4562 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4563 if (htab->fix_cortex_a8)
4564 for (i = 0; i < num_a8_fixes; i++)
4565 {
4566 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4567 a8_fixes[i].section, htab);
4568
4569 if (stub_sec == NULL)
4570 goto error_ret_free_local;
4571
4572 stub_sec->size
4573 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4574 NULL);
4575 }
4576
4577
4578 /* Ask the linker to do its stuff. */
4579 (*htab->layout_sections_again) ();
4580 stub_changed = FALSE;
4581 prev_num_a8_fixes = num_a8_fixes;
4582 }
4583
4584 /* Add stubs for Cortex-A8 erratum fixes now. */
4585 if (htab->fix_cortex_a8)
4586 {
4587 for (i = 0; i < num_a8_fixes; i++)
4588 {
4589 struct elf32_arm_stub_hash_entry *stub_entry;
4590 char *stub_name = a8_fixes[i].stub_name;
4591 asection *section = a8_fixes[i].section;
4592 unsigned int section_id = a8_fixes[i].section->id;
4593 asection *link_sec = htab->stub_group[section_id].link_sec;
4594 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4595 const insn_sequence *template;
4596 int template_size, size = 0;
4597
4598 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4599 TRUE, FALSE);
4600 if (stub_entry == NULL)
4601 {
4602 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4603 section->owner,
4604 stub_name);
4605 return FALSE;
4606 }
4607
4608 stub_entry->stub_sec = stub_sec;
4609 stub_entry->stub_offset = 0;
4610 stub_entry->id_sec = link_sec;
4611 stub_entry->stub_type = a8_fixes[i].stub_type;
4612 stub_entry->target_section = a8_fixes[i].section;
4613 stub_entry->target_value = a8_fixes[i].offset;
4614 stub_entry->target_addend = a8_fixes[i].addend;
4615 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4616 stub_entry->st_type = STT_ARM_TFUNC;
4617
4618 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4619 &template_size);
4620
4621 stub_entry->stub_size = size;
4622 stub_entry->stub_template = template;
4623 stub_entry->stub_template_size = template_size;
4624 }
4625
4626 /* Stash the Cortex-A8 erratum fix array for use later in
4627 elf32_arm_write_section(). */
4628 htab->a8_erratum_fixes = a8_fixes;
4629 htab->num_a8_erratum_fixes = num_a8_fixes;
4630 }
4631 else
4632 {
4633 htab->a8_erratum_fixes = NULL;
4634 htab->num_a8_erratum_fixes = 0;
4635 }
4636 return TRUE;
4637
4638 error_ret_free_local:
4639 return FALSE;
4640 }
4641
4642 /* Build all the stubs associated with the current output file. The
4643 stubs are kept in a hash table attached to the main linker hash
4644 table. We also set up the .plt entries for statically linked PIC
4645 functions here. This function is called via arm_elf_finish in the
4646 linker. */
4647
4648 bfd_boolean
4649 elf32_arm_build_stubs (struct bfd_link_info *info)
4650 {
4651 asection *stub_sec;
4652 struct bfd_hash_table *table;
4653 struct elf32_arm_link_hash_table *htab;
4654
4655 htab = elf32_arm_hash_table (info);
4656
4657 for (stub_sec = htab->stub_bfd->sections;
4658 stub_sec != NULL;
4659 stub_sec = stub_sec->next)
4660 {
4661 bfd_size_type size;
4662
4663 /* Ignore non-stub sections. */
4664 if (!strstr (stub_sec->name, STUB_SUFFIX))
4665 continue;
4666
4667 /* Allocate memory to hold the linker stubs. */
4668 size = stub_sec->size;
4669 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4670 if (stub_sec->contents == NULL && size != 0)
4671 return FALSE;
4672 stub_sec->size = 0;
4673 }
4674
4675 /* Build the stubs as directed by the stub hash table. */
4676 table = &htab->stub_hash_table;
4677 bfd_hash_traverse (table, arm_build_one_stub, info);
4678
4679 return TRUE;
4680 }
4681
4682 /* Locate the Thumb encoded calling stub for NAME. */
4683
4684 static struct elf_link_hash_entry *
4685 find_thumb_glue (struct bfd_link_info *link_info,
4686 const char *name,
4687 char **error_message)
4688 {
4689 char *tmp_name;
4690 struct elf_link_hash_entry *hash;
4691 struct elf32_arm_link_hash_table *hash_table;
4692
4693 /* We need a pointer to the armelf specific hash table. */
4694 hash_table = elf32_arm_hash_table (link_info);
4695
4696 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4697 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4698
4699 BFD_ASSERT (tmp_name);
4700
4701 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4702
4703 hash = elf_link_hash_lookup
4704 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4705
4706 if (hash == NULL
4707 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4708 tmp_name, name) == -1)
4709 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4710
4711 free (tmp_name);
4712
4713 return hash;
4714 }
4715
4716 /* Locate the ARM encoded calling stub for NAME. */
4717
4718 static struct elf_link_hash_entry *
4719 find_arm_glue (struct bfd_link_info *link_info,
4720 const char *name,
4721 char **error_message)
4722 {
4723 char *tmp_name;
4724 struct elf_link_hash_entry *myh;
4725 struct elf32_arm_link_hash_table *hash_table;
4726
4727 /* We need a pointer to the elfarm specific hash table. */
4728 hash_table = elf32_arm_hash_table (link_info);
4729
4730 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4731 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4732
4733 BFD_ASSERT (tmp_name);
4734
4735 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4736
4737 myh = elf_link_hash_lookup
4738 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4739
4740 if (myh == NULL
4741 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4742 tmp_name, name) == -1)
4743 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4744
4745 free (tmp_name);
4746
4747 return myh;
4748 }
4749
4750 /* ARM->Thumb glue (static images):
4751
4752 .arm
4753 __func_from_arm:
4754 ldr r12, __func_addr
4755 bx r12
4756 __func_addr:
4757 .word func @ behave as if you saw a ARM_32 reloc.
4758
4759 (v5t static images)
4760 .arm
4761 __func_from_arm:
4762 ldr pc, __func_addr
4763 __func_addr:
4764 .word func @ behave as if you saw a ARM_32 reloc.
4765
4766 (relocatable images)
4767 .arm
4768 __func_from_arm:
4769 ldr r12, __func_offset
4770 add r12, r12, pc
4771 bx r12
4772 __func_offset:
4773 .word func - . */
4774
4775 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4776 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4777 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4778 static const insn32 a2t3_func_addr_insn = 0x00000001;
4779
4780 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4781 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4782 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4783
4784 #define ARM2THUMB_PIC_GLUE_SIZE 16
4785 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4786 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4787 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4788
4789 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4790
4791 .thumb .thumb
4792 .align 2 .align 2
4793 __func_from_thumb: __func_from_thumb:
4794 bx pc push {r6, lr}
4795 nop ldr r6, __func_addr
4796 .arm mov lr, pc
4797 b func bx r6
4798 .arm
4799 ;; back_to_thumb
4800 ldmia r13! {r6, lr}
4801 bx lr
4802 __func_addr:
4803 .word func */
4804
4805 #define THUMB2ARM_GLUE_SIZE 8
4806 static const insn16 t2a1_bx_pc_insn = 0x4778;
4807 static const insn16 t2a2_noop_insn = 0x46c0;
4808 static const insn32 t2a3_b_insn = 0xea000000;
4809
4810 #define VFP11_ERRATUM_VENEER_SIZE 8
4811
4812 #define ARM_BX_VENEER_SIZE 12
4813 static const insn32 armbx1_tst_insn = 0xe3100001;
4814 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4815 static const insn32 armbx3_bx_insn = 0xe12fff10;
4816
4817 #ifndef ELFARM_NABI_C_INCLUDED
4818 static void
4819 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4820 {
4821 asection * s;
4822 bfd_byte * contents;
4823
4824 if (size == 0)
4825 {
4826 /* Do not include empty glue sections in the output. */
4827 if (abfd != NULL)
4828 {
4829 s = bfd_get_section_by_name (abfd, name);
4830 if (s != NULL)
4831 s->flags |= SEC_EXCLUDE;
4832 }
4833 return;
4834 }
4835
4836 BFD_ASSERT (abfd != NULL);
4837
4838 s = bfd_get_section_by_name (abfd, name);
4839 BFD_ASSERT (s != NULL);
4840
4841 contents = bfd_alloc (abfd, size);
4842
4843 BFD_ASSERT (s->size == size);
4844 s->contents = contents;
4845 }
4846
4847 bfd_boolean
4848 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4849 {
4850 struct elf32_arm_link_hash_table * globals;
4851
4852 globals = elf32_arm_hash_table (info);
4853 BFD_ASSERT (globals != NULL);
4854
4855 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4856 globals->arm_glue_size,
4857 ARM2THUMB_GLUE_SECTION_NAME);
4858
4859 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4860 globals->thumb_glue_size,
4861 THUMB2ARM_GLUE_SECTION_NAME);
4862
4863 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4864 globals->vfp11_erratum_glue_size,
4865 VFP11_ERRATUM_VENEER_SECTION_NAME);
4866
4867 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4868 globals->bx_glue_size,
4869 ARM_BX_GLUE_SECTION_NAME);
4870
4871 return TRUE;
4872 }
4873
4874 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4875 returns the symbol identifying the stub. */
4876
4877 static struct elf_link_hash_entry *
4878 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4879 struct elf_link_hash_entry * h)
4880 {
4881 const char * name = h->root.root.string;
4882 asection * s;
4883 char * tmp_name;
4884 struct elf_link_hash_entry * myh;
4885 struct bfd_link_hash_entry * bh;
4886 struct elf32_arm_link_hash_table * globals;
4887 bfd_vma val;
4888 bfd_size_type size;
4889
4890 globals = elf32_arm_hash_table (link_info);
4891
4892 BFD_ASSERT (globals != NULL);
4893 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4894
4895 s = bfd_get_section_by_name
4896 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4897
4898 BFD_ASSERT (s != NULL);
4899
4900 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4901
4902 BFD_ASSERT (tmp_name);
4903
4904 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4905
4906 myh = elf_link_hash_lookup
4907 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4908
4909 if (myh != NULL)
4910 {
4911 /* We've already seen this guy. */
4912 free (tmp_name);
4913 return myh;
4914 }
4915
4916 /* The only trick here is using hash_table->arm_glue_size as the value.
4917 Even though the section isn't allocated yet, this is where we will be
4918 putting it. The +1 on the value marks that the stub has not been
4919 output yet - not that it is a Thumb function. */
4920 bh = NULL;
4921 val = globals->arm_glue_size + 1;
4922 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4923 tmp_name, BSF_GLOBAL, s, val,
4924 NULL, TRUE, FALSE, &bh);
4925
4926 myh = (struct elf_link_hash_entry *) bh;
4927 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4928 myh->forced_local = 1;
4929
4930 free (tmp_name);
4931
4932 if (link_info->shared || globals->root.is_relocatable_executable
4933 || globals->pic_veneer)
4934 size = ARM2THUMB_PIC_GLUE_SIZE;
4935 else if (globals->use_blx)
4936 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4937 else
4938 size = ARM2THUMB_STATIC_GLUE_SIZE;
4939
4940 s->size += size;
4941 globals->arm_glue_size += size;
4942
4943 return myh;
4944 }
4945
4946 /* Allocate space for ARMv4 BX veneers. */
4947
4948 static void
4949 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4950 {
4951 asection * s;
4952 struct elf32_arm_link_hash_table *globals;
4953 char *tmp_name;
4954 struct elf_link_hash_entry *myh;
4955 struct bfd_link_hash_entry *bh;
4956 bfd_vma val;
4957
4958 /* BX PC does not need a veneer. */
4959 if (reg == 15)
4960 return;
4961
4962 globals = elf32_arm_hash_table (link_info);
4963
4964 BFD_ASSERT (globals != NULL);
4965 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4966
4967 /* Check if this veneer has already been allocated. */
4968 if (globals->bx_glue_offset[reg])
4969 return;
4970
4971 s = bfd_get_section_by_name
4972 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4973
4974 BFD_ASSERT (s != NULL);
4975
4976 /* Add symbol for veneer. */
4977 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
4978
4979 BFD_ASSERT (tmp_name);
4980
4981 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
4982
4983 myh = elf_link_hash_lookup
4984 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
4985
4986 BFD_ASSERT (myh == NULL);
4987
4988 bh = NULL;
4989 val = globals->bx_glue_size;
4990 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4991 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
4992 NULL, TRUE, FALSE, &bh);
4993
4994 myh = (struct elf_link_hash_entry *) bh;
4995 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4996 myh->forced_local = 1;
4997
4998 s->size += ARM_BX_VENEER_SIZE;
4999 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5000 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5001 }
5002
5003
5004 /* Add an entry to the code/data map for section SEC. */
5005
5006 static void
5007 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5008 {
5009 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5010 unsigned int newidx;
5011
5012 if (sec_data->map == NULL)
5013 {
5014 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5015 sec_data->mapcount = 0;
5016 sec_data->mapsize = 1;
5017 }
5018
5019 newidx = sec_data->mapcount++;
5020
5021 if (sec_data->mapcount > sec_data->mapsize)
5022 {
5023 sec_data->mapsize *= 2;
5024 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5025 * sizeof (elf32_arm_section_map));
5026 }
5027
5028 if (sec_data->map)
5029 {
5030 sec_data->map[newidx].vma = vma;
5031 sec_data->map[newidx].type = type;
5032 }
5033 }
5034
5035
5036 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5037 veneers are handled for now. */
5038
5039 static bfd_vma
5040 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5041 elf32_vfp11_erratum_list *branch,
5042 bfd *branch_bfd,
5043 asection *branch_sec,
5044 unsigned int offset)
5045 {
5046 asection *s;
5047 struct elf32_arm_link_hash_table *hash_table;
5048 char *tmp_name;
5049 struct elf_link_hash_entry *myh;
5050 struct bfd_link_hash_entry *bh;
5051 bfd_vma val;
5052 struct _arm_elf_section_data *sec_data;
5053 int errcount;
5054 elf32_vfp11_erratum_list *newerr;
5055
5056 hash_table = elf32_arm_hash_table (link_info);
5057
5058 BFD_ASSERT (hash_table != NULL);
5059 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5060
5061 s = bfd_get_section_by_name
5062 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5063
5064 sec_data = elf32_arm_section_data (s);
5065
5066 BFD_ASSERT (s != NULL);
5067
5068 tmp_name = bfd_malloc ((bfd_size_type) strlen
5069 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5070
5071 BFD_ASSERT (tmp_name);
5072
5073 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5074 hash_table->num_vfp11_fixes);
5075
5076 myh = elf_link_hash_lookup
5077 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5078
5079 BFD_ASSERT (myh == NULL);
5080
5081 bh = NULL;
5082 val = hash_table->vfp11_erratum_glue_size;
5083 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5084 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5085 NULL, TRUE, FALSE, &bh);
5086
5087 myh = (struct elf_link_hash_entry *) bh;
5088 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5089 myh->forced_local = 1;
5090
5091 /* Link veneer back to calling location. */
5092 errcount = ++(sec_data->erratumcount);
5093 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5094
5095 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5096 newerr->vma = -1;
5097 newerr->u.v.branch = branch;
5098 newerr->u.v.id = hash_table->num_vfp11_fixes;
5099 branch->u.b.veneer = newerr;
5100
5101 newerr->next = sec_data->erratumlist;
5102 sec_data->erratumlist = newerr;
5103
5104 /* A symbol for the return from the veneer. */
5105 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5106 hash_table->num_vfp11_fixes);
5107
5108 myh = elf_link_hash_lookup
5109 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5110
5111 if (myh != NULL)
5112 abort ();
5113
5114 bh = NULL;
5115 val = offset + 4;
5116 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5117 branch_sec, val, NULL, TRUE, FALSE, &bh);
5118
5119 myh = (struct elf_link_hash_entry *) bh;
5120 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5121 myh->forced_local = 1;
5122
5123 free (tmp_name);
5124
5125 /* Generate a mapping symbol for the veneer section, and explicitly add an
5126 entry for that symbol to the code/data map for the section. */
5127 if (hash_table->vfp11_erratum_glue_size == 0)
5128 {
5129 bh = NULL;
5130 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5131 ever requires this erratum fix. */
5132 _bfd_generic_link_add_one_symbol (link_info,
5133 hash_table->bfd_of_glue_owner, "$a",
5134 BSF_LOCAL, s, 0, NULL,
5135 TRUE, FALSE, &bh);
5136
5137 myh = (struct elf_link_hash_entry *) bh;
5138 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5139 myh->forced_local = 1;
5140
5141 /* The elf32_arm_init_maps function only cares about symbols from input
5142 BFDs. We must make a note of this generated mapping symbol
5143 ourselves so that code byteswapping works properly in
5144 elf32_arm_write_section. */
5145 elf32_arm_section_map_add (s, 'a', 0);
5146 }
5147
5148 s->size += VFP11_ERRATUM_VENEER_SIZE;
5149 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5150 hash_table->num_vfp11_fixes++;
5151
5152 /* The offset of the veneer. */
5153 return val;
5154 }
5155
5156 #define ARM_GLUE_SECTION_FLAGS \
5157 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5158 | SEC_READONLY | SEC_LINKER_CREATED)
5159
5160 /* Create a fake section for use by the ARM backend of the linker. */
5161
5162 static bfd_boolean
5163 arm_make_glue_section (bfd * abfd, const char * name)
5164 {
5165 asection * sec;
5166
5167 sec = bfd_get_section_by_name (abfd, name);
5168 if (sec != NULL)
5169 /* Already made. */
5170 return TRUE;
5171
5172 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5173
5174 if (sec == NULL
5175 || !bfd_set_section_alignment (abfd, sec, 2))
5176 return FALSE;
5177
5178 /* Set the gc mark to prevent the section from being removed by garbage
5179 collection, despite the fact that no relocs refer to this section. */
5180 sec->gc_mark = 1;
5181
5182 return TRUE;
5183 }
5184
5185 /* Add the glue sections to ABFD. This function is called from the
5186 linker scripts in ld/emultempl/{armelf}.em. */
5187
5188 bfd_boolean
5189 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5190 struct bfd_link_info *info)
5191 {
5192 /* If we are only performing a partial
5193 link do not bother adding the glue. */
5194 if (info->relocatable)
5195 return TRUE;
5196
5197 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5198 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5199 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5200 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5201 }
5202
5203 /* Select a BFD to be used to hold the sections used by the glue code.
5204 This function is called from the linker scripts in ld/emultempl/
5205 {armelf/pe}.em. */
5206
5207 bfd_boolean
5208 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5209 {
5210 struct elf32_arm_link_hash_table *globals;
5211
5212 /* If we are only performing a partial link
5213 do not bother getting a bfd to hold the glue. */
5214 if (info->relocatable)
5215 return TRUE;
5216
5217 /* Make sure we don't attach the glue sections to a dynamic object. */
5218 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5219
5220 globals = elf32_arm_hash_table (info);
5221
5222 BFD_ASSERT (globals != NULL);
5223
5224 if (globals->bfd_of_glue_owner != NULL)
5225 return TRUE;
5226
5227 /* Save the bfd for later use. */
5228 globals->bfd_of_glue_owner = abfd;
5229
5230 return TRUE;
5231 }
5232
5233 static void
5234 check_use_blx (struct elf32_arm_link_hash_table *globals)
5235 {
5236 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5237 Tag_CPU_arch) > 2)
5238 globals->use_blx = 1;
5239 }
5240
5241 bfd_boolean
5242 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5243 struct bfd_link_info *link_info)
5244 {
5245 Elf_Internal_Shdr *symtab_hdr;
5246 Elf_Internal_Rela *internal_relocs = NULL;
5247 Elf_Internal_Rela *irel, *irelend;
5248 bfd_byte *contents = NULL;
5249
5250 asection *sec;
5251 struct elf32_arm_link_hash_table *globals;
5252
5253 /* If we are only performing a partial link do not bother
5254 to construct any glue. */
5255 if (link_info->relocatable)
5256 return TRUE;
5257
5258 /* Here we have a bfd that is to be included on the link. We have a
5259 hook to do reloc rummaging, before section sizes are nailed down. */
5260 globals = elf32_arm_hash_table (link_info);
5261
5262 BFD_ASSERT (globals != NULL);
5263
5264 check_use_blx (globals);
5265
5266 if (globals->byteswap_code && !bfd_big_endian (abfd))
5267 {
5268 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5269 abfd);
5270 return FALSE;
5271 }
5272
5273 /* PR 5398: If we have not decided to include any loadable sections in
5274 the output then we will not have a glue owner bfd. This is OK, it
5275 just means that there is nothing else for us to do here. */
5276 if (globals->bfd_of_glue_owner == NULL)
5277 return TRUE;
5278
5279 /* Rummage around all the relocs and map the glue vectors. */
5280 sec = abfd->sections;
5281
5282 if (sec == NULL)
5283 return TRUE;
5284
5285 for (; sec != NULL; sec = sec->next)
5286 {
5287 if (sec->reloc_count == 0)
5288 continue;
5289
5290 if ((sec->flags & SEC_EXCLUDE) != 0)
5291 continue;
5292
5293 symtab_hdr = & elf_symtab_hdr (abfd);
5294
5295 /* Load the relocs. */
5296 internal_relocs
5297 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5298
5299 if (internal_relocs == NULL)
5300 goto error_return;
5301
5302 irelend = internal_relocs + sec->reloc_count;
5303 for (irel = internal_relocs; irel < irelend; irel++)
5304 {
5305 long r_type;
5306 unsigned long r_index;
5307
5308 struct elf_link_hash_entry *h;
5309
5310 r_type = ELF32_R_TYPE (irel->r_info);
5311 r_index = ELF32_R_SYM (irel->r_info);
5312
5313 /* These are the only relocation types we care about. */
5314 if ( r_type != R_ARM_PC24
5315 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5316 continue;
5317
5318 /* Get the section contents if we haven't done so already. */
5319 if (contents == NULL)
5320 {
5321 /* Get cached copy if it exists. */
5322 if (elf_section_data (sec)->this_hdr.contents != NULL)
5323 contents = elf_section_data (sec)->this_hdr.contents;
5324 else
5325 {
5326 /* Go get them off disk. */
5327 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5328 goto error_return;
5329 }
5330 }
5331
5332 if (r_type == R_ARM_V4BX)
5333 {
5334 int reg;
5335
5336 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5337 record_arm_bx_glue (link_info, reg);
5338 continue;
5339 }
5340
5341 /* If the relocation is not against a symbol it cannot concern us. */
5342 h = NULL;
5343
5344 /* We don't care about local symbols. */
5345 if (r_index < symtab_hdr->sh_info)
5346 continue;
5347
5348 /* This is an external symbol. */
5349 r_index -= symtab_hdr->sh_info;
5350 h = (struct elf_link_hash_entry *)
5351 elf_sym_hashes (abfd)[r_index];
5352
5353 /* If the relocation is against a static symbol it must be within
5354 the current section and so cannot be a cross ARM/Thumb relocation. */
5355 if (h == NULL)
5356 continue;
5357
5358 /* If the call will go through a PLT entry then we do not need
5359 glue. */
5360 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5361 continue;
5362
5363 switch (r_type)
5364 {
5365 case R_ARM_PC24:
5366 /* This one is a call from arm code. We need to look up
5367 the target of the call. If it is a thumb target, we
5368 insert glue. */
5369 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5370 record_arm_to_thumb_glue (link_info, h);
5371 break;
5372
5373 default:
5374 abort ();
5375 }
5376 }
5377
5378 if (contents != NULL
5379 && elf_section_data (sec)->this_hdr.contents != contents)
5380 free (contents);
5381 contents = NULL;
5382
5383 if (internal_relocs != NULL
5384 && elf_section_data (sec)->relocs != internal_relocs)
5385 free (internal_relocs);
5386 internal_relocs = NULL;
5387 }
5388
5389 return TRUE;
5390
5391 error_return:
5392 if (contents != NULL
5393 && elf_section_data (sec)->this_hdr.contents != contents)
5394 free (contents);
5395 if (internal_relocs != NULL
5396 && elf_section_data (sec)->relocs != internal_relocs)
5397 free (internal_relocs);
5398
5399 return FALSE;
5400 }
5401 #endif
5402
5403
5404 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5405
5406 void
5407 bfd_elf32_arm_init_maps (bfd *abfd)
5408 {
5409 Elf_Internal_Sym *isymbuf;
5410 Elf_Internal_Shdr *hdr;
5411 unsigned int i, localsyms;
5412
5413 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5414 if (! is_arm_elf (abfd))
5415 return;
5416
5417 if ((abfd->flags & DYNAMIC) != 0)
5418 return;
5419
5420 hdr = & elf_symtab_hdr (abfd);
5421 localsyms = hdr->sh_info;
5422
5423 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5424 should contain the number of local symbols, which should come before any
5425 global symbols. Mapping symbols are always local. */
5426 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5427 NULL);
5428
5429 /* No internal symbols read? Skip this BFD. */
5430 if (isymbuf == NULL)
5431 return;
5432
5433 for (i = 0; i < localsyms; i++)
5434 {
5435 Elf_Internal_Sym *isym = &isymbuf[i];
5436 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5437 const char *name;
5438
5439 if (sec != NULL
5440 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5441 {
5442 name = bfd_elf_string_from_elf_section (abfd,
5443 hdr->sh_link, isym->st_name);
5444
5445 if (bfd_is_arm_special_symbol_name (name,
5446 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5447 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5448 }
5449 }
5450 }
5451
5452
5453 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5454 say what they wanted. */
5455
5456 void
5457 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5458 {
5459 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5460 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5461
5462 if (globals->fix_cortex_a8 == -1)
5463 {
5464 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5465 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5466 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5467 || out_attr[Tag_CPU_arch_profile].i == 0))
5468 globals->fix_cortex_a8 = 1;
5469 else
5470 globals->fix_cortex_a8 = 0;
5471 }
5472 }
5473
5474
5475 void
5476 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5477 {
5478 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5479 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5480
5481 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5482 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5483 {
5484 switch (globals->vfp11_fix)
5485 {
5486 case BFD_ARM_VFP11_FIX_DEFAULT:
5487 case BFD_ARM_VFP11_FIX_NONE:
5488 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5489 break;
5490
5491 default:
5492 /* Give a warning, but do as the user requests anyway. */
5493 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5494 "workaround is not necessary for target architecture"), obfd);
5495 }
5496 }
5497 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5498 /* For earlier architectures, we might need the workaround, but do not
5499 enable it by default. If users is running with broken hardware, they
5500 must enable the erratum fix explicitly. */
5501 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5502 }
5503
5504
5505 enum bfd_arm_vfp11_pipe
5506 {
5507 VFP11_FMAC,
5508 VFP11_LS,
5509 VFP11_DS,
5510 VFP11_BAD
5511 };
5512
5513 /* Return a VFP register number. This is encoded as RX:X for single-precision
5514 registers, or X:RX for double-precision registers, where RX is the group of
5515 four bits in the instruction encoding and X is the single extension bit.
5516 RX and X fields are specified using their lowest (starting) bit. The return
5517 value is:
5518
5519 0...31: single-precision registers s0...s31
5520 32...63: double-precision registers d0...d31.
5521
5522 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5523 encounter VFP3 instructions, so we allow the full range for DP registers. */
5524
5525 static unsigned int
5526 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5527 unsigned int x)
5528 {
5529 if (is_double)
5530 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5531 else
5532 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5533 }
5534
5535 /* Set bits in *WMASK according to a register number REG as encoded by
5536 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5537
5538 static void
5539 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5540 {
5541 if (reg < 32)
5542 *wmask |= 1 << reg;
5543 else if (reg < 48)
5544 *wmask |= 3 << ((reg - 32) * 2);
5545 }
5546
5547 /* Return TRUE if WMASK overwrites anything in REGS. */
5548
5549 static bfd_boolean
5550 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5551 {
5552 int i;
5553
5554 for (i = 0; i < numregs; i++)
5555 {
5556 unsigned int reg = regs[i];
5557
5558 if (reg < 32 && (wmask & (1 << reg)) != 0)
5559 return TRUE;
5560
5561 reg -= 32;
5562
5563 if (reg >= 16)
5564 continue;
5565
5566 if ((wmask & (3 << (reg * 2))) != 0)
5567 return TRUE;
5568 }
5569
5570 return FALSE;
5571 }
5572
5573 /* In this function, we're interested in two things: finding input registers
5574 for VFP data-processing instructions, and finding the set of registers which
5575 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5576 hold the written set, so FLDM etc. are easy to deal with (we're only
5577 interested in 32 SP registers or 16 dp registers, due to the VFP version
5578 implemented by the chip in question). DP registers are marked by setting
5579 both SP registers in the write mask). */
5580
5581 static enum bfd_arm_vfp11_pipe
5582 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5583 int *numregs)
5584 {
5585 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5586 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5587
5588 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5589 {
5590 unsigned int pqrs;
5591 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5592 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5593
5594 pqrs = ((insn & 0x00800000) >> 20)
5595 | ((insn & 0x00300000) >> 19)
5596 | ((insn & 0x00000040) >> 6);
5597
5598 switch (pqrs)
5599 {
5600 case 0: /* fmac[sd]. */
5601 case 1: /* fnmac[sd]. */
5602 case 2: /* fmsc[sd]. */
5603 case 3: /* fnmsc[sd]. */
5604 pipe = VFP11_FMAC;
5605 bfd_arm_vfp11_write_mask (destmask, fd);
5606 regs[0] = fd;
5607 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5608 regs[2] = fm;
5609 *numregs = 3;
5610 break;
5611
5612 case 4: /* fmul[sd]. */
5613 case 5: /* fnmul[sd]. */
5614 case 6: /* fadd[sd]. */
5615 case 7: /* fsub[sd]. */
5616 pipe = VFP11_FMAC;
5617 goto vfp_binop;
5618
5619 case 8: /* fdiv[sd]. */
5620 pipe = VFP11_DS;
5621 vfp_binop:
5622 bfd_arm_vfp11_write_mask (destmask, fd);
5623 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5624 regs[1] = fm;
5625 *numregs = 2;
5626 break;
5627
5628 case 15: /* extended opcode. */
5629 {
5630 unsigned int extn = ((insn >> 15) & 0x1e)
5631 | ((insn >> 7) & 1);
5632
5633 switch (extn)
5634 {
5635 case 0: /* fcpy[sd]. */
5636 case 1: /* fabs[sd]. */
5637 case 2: /* fneg[sd]. */
5638 case 8: /* fcmp[sd]. */
5639 case 9: /* fcmpe[sd]. */
5640 case 10: /* fcmpz[sd]. */
5641 case 11: /* fcmpez[sd]. */
5642 case 16: /* fuito[sd]. */
5643 case 17: /* fsito[sd]. */
5644 case 24: /* ftoui[sd]. */
5645 case 25: /* ftouiz[sd]. */
5646 case 26: /* ftosi[sd]. */
5647 case 27: /* ftosiz[sd]. */
5648 /* These instructions will not bounce due to underflow. */
5649 *numregs = 0;
5650 pipe = VFP11_FMAC;
5651 break;
5652
5653 case 3: /* fsqrt[sd]. */
5654 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5655 registers to cause the erratum in previous instructions. */
5656 bfd_arm_vfp11_write_mask (destmask, fd);
5657 pipe = VFP11_DS;
5658 break;
5659
5660 case 15: /* fcvt{ds,sd}. */
5661 {
5662 int rnum = 0;
5663
5664 bfd_arm_vfp11_write_mask (destmask, fd);
5665
5666 /* Only FCVTSD can underflow. */
5667 if ((insn & 0x100) != 0)
5668 regs[rnum++] = fm;
5669
5670 *numregs = rnum;
5671
5672 pipe = VFP11_FMAC;
5673 }
5674 break;
5675
5676 default:
5677 return VFP11_BAD;
5678 }
5679 }
5680 break;
5681
5682 default:
5683 return VFP11_BAD;
5684 }
5685 }
5686 /* Two-register transfer. */
5687 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5688 {
5689 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5690
5691 if ((insn & 0x100000) == 0)
5692 {
5693 if (is_double)
5694 bfd_arm_vfp11_write_mask (destmask, fm);
5695 else
5696 {
5697 bfd_arm_vfp11_write_mask (destmask, fm);
5698 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5699 }
5700 }
5701
5702 pipe = VFP11_LS;
5703 }
5704 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5705 {
5706 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5707 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5708
5709 switch (puw)
5710 {
5711 case 0: /* Two-reg transfer. We should catch these above. */
5712 abort ();
5713
5714 case 2: /* fldm[sdx]. */
5715 case 3:
5716 case 5:
5717 {
5718 unsigned int i, offset = insn & 0xff;
5719
5720 if (is_double)
5721 offset >>= 1;
5722
5723 for (i = fd; i < fd + offset; i++)
5724 bfd_arm_vfp11_write_mask (destmask, i);
5725 }
5726 break;
5727
5728 case 4: /* fld[sd]. */
5729 case 6:
5730 bfd_arm_vfp11_write_mask (destmask, fd);
5731 break;
5732
5733 default:
5734 return VFP11_BAD;
5735 }
5736
5737 pipe = VFP11_LS;
5738 }
5739 /* Single-register transfer. Note L==0. */
5740 else if ((insn & 0x0f100e10) == 0x0e000a10)
5741 {
5742 unsigned int opcode = (insn >> 21) & 7;
5743 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5744
5745 switch (opcode)
5746 {
5747 case 0: /* fmsr/fmdlr. */
5748 case 1: /* fmdhr. */
5749 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5750 destination register. I don't know if this is exactly right,
5751 but it is the conservative choice. */
5752 bfd_arm_vfp11_write_mask (destmask, fn);
5753 break;
5754
5755 case 7: /* fmxr. */
5756 break;
5757 }
5758
5759 pipe = VFP11_LS;
5760 }
5761
5762 return pipe;
5763 }
5764
5765
5766 static int elf32_arm_compare_mapping (const void * a, const void * b);
5767
5768
5769 /* Look for potentially-troublesome code sequences which might trigger the
5770 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5771 (available from ARM) for details of the erratum. A short version is
5772 described in ld.texinfo. */
5773
5774 bfd_boolean
5775 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5776 {
5777 asection *sec;
5778 bfd_byte *contents = NULL;
5779 int state = 0;
5780 int regs[3], numregs = 0;
5781 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5782 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5783
5784 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5785 The states transition as follows:
5786
5787 0 -> 1 (vector) or 0 -> 2 (scalar)
5788 A VFP FMAC-pipeline instruction has been seen. Fill
5789 regs[0]..regs[numregs-1] with its input operands. Remember this
5790 instruction in 'first_fmac'.
5791
5792 1 -> 2
5793 Any instruction, except for a VFP instruction which overwrites
5794 regs[*].
5795
5796 1 -> 3 [ -> 0 ] or
5797 2 -> 3 [ -> 0 ]
5798 A VFP instruction has been seen which overwrites any of regs[*].
5799 We must make a veneer! Reset state to 0 before examining next
5800 instruction.
5801
5802 2 -> 0
5803 If we fail to match anything in state 2, reset to state 0 and reset
5804 the instruction pointer to the instruction after 'first_fmac'.
5805
5806 If the VFP11 vector mode is in use, there must be at least two unrelated
5807 instructions between anti-dependent VFP11 instructions to properly avoid
5808 triggering the erratum, hence the use of the extra state 1. */
5809
5810 /* If we are only performing a partial link do not bother
5811 to construct any glue. */
5812 if (link_info->relocatable)
5813 return TRUE;
5814
5815 /* Skip if this bfd does not correspond to an ELF image. */
5816 if (! is_arm_elf (abfd))
5817 return TRUE;
5818
5819 /* We should have chosen a fix type by the time we get here. */
5820 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5821
5822 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5823 return TRUE;
5824
5825 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5826 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5827 return TRUE;
5828
5829 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5830 {
5831 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5832 struct _arm_elf_section_data *sec_data;
5833
5834 /* If we don't have executable progbits, we're not interested in this
5835 section. Also skip if section is to be excluded. */
5836 if (elf_section_type (sec) != SHT_PROGBITS
5837 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5838 || (sec->flags & SEC_EXCLUDE) != 0
5839 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5840 || sec->output_section == bfd_abs_section_ptr
5841 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5842 continue;
5843
5844 sec_data = elf32_arm_section_data (sec);
5845
5846 if (sec_data->mapcount == 0)
5847 continue;
5848
5849 if (elf_section_data (sec)->this_hdr.contents != NULL)
5850 contents = elf_section_data (sec)->this_hdr.contents;
5851 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5852 goto error_return;
5853
5854 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5855 elf32_arm_compare_mapping);
5856
5857 for (span = 0; span < sec_data->mapcount; span++)
5858 {
5859 unsigned int span_start = sec_data->map[span].vma;
5860 unsigned int span_end = (span == sec_data->mapcount - 1)
5861 ? sec->size : sec_data->map[span + 1].vma;
5862 char span_type = sec_data->map[span].type;
5863
5864 /* FIXME: Only ARM mode is supported at present. We may need to
5865 support Thumb-2 mode also at some point. */
5866 if (span_type != 'a')
5867 continue;
5868
5869 for (i = span_start; i < span_end;)
5870 {
5871 unsigned int next_i = i + 4;
5872 unsigned int insn = bfd_big_endian (abfd)
5873 ? (contents[i] << 24)
5874 | (contents[i + 1] << 16)
5875 | (contents[i + 2] << 8)
5876 | contents[i + 3]
5877 : (contents[i + 3] << 24)
5878 | (contents[i + 2] << 16)
5879 | (contents[i + 1] << 8)
5880 | contents[i];
5881 unsigned int writemask = 0;
5882 enum bfd_arm_vfp11_pipe pipe;
5883
5884 switch (state)
5885 {
5886 case 0:
5887 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5888 &numregs);
5889 /* I'm assuming the VFP11 erratum can trigger with denorm
5890 operands on either the FMAC or the DS pipeline. This might
5891 lead to slightly overenthusiastic veneer insertion. */
5892 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5893 {
5894 state = use_vector ? 1 : 2;
5895 first_fmac = i;
5896 veneer_of_insn = insn;
5897 }
5898 break;
5899
5900 case 1:
5901 {
5902 int other_regs[3], other_numregs;
5903 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5904 other_regs,
5905 &other_numregs);
5906 if (pipe != VFP11_BAD
5907 && bfd_arm_vfp11_antidependency (writemask, regs,
5908 numregs))
5909 state = 3;
5910 else
5911 state = 2;
5912 }
5913 break;
5914
5915 case 2:
5916 {
5917 int other_regs[3], other_numregs;
5918 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5919 other_regs,
5920 &other_numregs);
5921 if (pipe != VFP11_BAD
5922 && bfd_arm_vfp11_antidependency (writemask, regs,
5923 numregs))
5924 state = 3;
5925 else
5926 {
5927 state = 0;
5928 next_i = first_fmac + 4;
5929 }
5930 }
5931 break;
5932
5933 case 3:
5934 abort (); /* Should be unreachable. */
5935 }
5936
5937 if (state == 3)
5938 {
5939 elf32_vfp11_erratum_list *newerr
5940 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5941 int errcount;
5942
5943 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5944
5945 newerr->u.b.vfp_insn = veneer_of_insn;
5946
5947 switch (span_type)
5948 {
5949 case 'a':
5950 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5951 break;
5952
5953 default:
5954 abort ();
5955 }
5956
5957 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5958 first_fmac);
5959
5960 newerr->vma = -1;
5961
5962 newerr->next = sec_data->erratumlist;
5963 sec_data->erratumlist = newerr;
5964
5965 state = 0;
5966 }
5967
5968 i = next_i;
5969 }
5970 }
5971
5972 if (contents != NULL
5973 && elf_section_data (sec)->this_hdr.contents != contents)
5974 free (contents);
5975 contents = NULL;
5976 }
5977
5978 return TRUE;
5979
5980 error_return:
5981 if (contents != NULL
5982 && elf_section_data (sec)->this_hdr.contents != contents)
5983 free (contents);
5984
5985 return FALSE;
5986 }
5987
5988 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
5989 after sections have been laid out, using specially-named symbols. */
5990
5991 void
5992 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
5993 struct bfd_link_info *link_info)
5994 {
5995 asection *sec;
5996 struct elf32_arm_link_hash_table *globals;
5997 char *tmp_name;
5998
5999 if (link_info->relocatable)
6000 return;
6001
6002 /* Skip if this bfd does not correspond to an ELF image. */
6003 if (! is_arm_elf (abfd))
6004 return;
6005
6006 globals = elf32_arm_hash_table (link_info);
6007
6008 tmp_name = bfd_malloc ((bfd_size_type) strlen
6009 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6010
6011 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6012 {
6013 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6014 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6015
6016 for (; errnode != NULL; errnode = errnode->next)
6017 {
6018 struct elf_link_hash_entry *myh;
6019 bfd_vma vma;
6020
6021 switch (errnode->type)
6022 {
6023 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6024 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6025 /* Find veneer symbol. */
6026 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6027 errnode->u.b.veneer->u.v.id);
6028
6029 myh = elf_link_hash_lookup
6030 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6031
6032 if (myh == NULL)
6033 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6034 "`%s'"), abfd, tmp_name);
6035
6036 vma = myh->root.u.def.section->output_section->vma
6037 + myh->root.u.def.section->output_offset
6038 + myh->root.u.def.value;
6039
6040 errnode->u.b.veneer->vma = vma;
6041 break;
6042
6043 case VFP11_ERRATUM_ARM_VENEER:
6044 case VFP11_ERRATUM_THUMB_VENEER:
6045 /* Find return location. */
6046 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6047 errnode->u.v.id);
6048
6049 myh = elf_link_hash_lookup
6050 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6051
6052 if (myh == NULL)
6053 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6054 "`%s'"), abfd, tmp_name);
6055
6056 vma = myh->root.u.def.section->output_section->vma
6057 + myh->root.u.def.section->output_offset
6058 + myh->root.u.def.value;
6059
6060 errnode->u.v.branch->vma = vma;
6061 break;
6062
6063 default:
6064 abort ();
6065 }
6066 }
6067 }
6068
6069 free (tmp_name);
6070 }
6071
6072
6073 /* Set target relocation values needed during linking. */
6074
6075 void
6076 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6077 struct bfd_link_info *link_info,
6078 int target1_is_rel,
6079 char * target2_type,
6080 int fix_v4bx,
6081 int use_blx,
6082 bfd_arm_vfp11_fix vfp11_fix,
6083 int no_enum_warn, int no_wchar_warn,
6084 int pic_veneer, int fix_cortex_a8)
6085 {
6086 struct elf32_arm_link_hash_table *globals;
6087
6088 globals = elf32_arm_hash_table (link_info);
6089
6090 globals->target1_is_rel = target1_is_rel;
6091 if (strcmp (target2_type, "rel") == 0)
6092 globals->target2_reloc = R_ARM_REL32;
6093 else if (strcmp (target2_type, "abs") == 0)
6094 globals->target2_reloc = R_ARM_ABS32;
6095 else if (strcmp (target2_type, "got-rel") == 0)
6096 globals->target2_reloc = R_ARM_GOT_PREL;
6097 else
6098 {
6099 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6100 target2_type);
6101 }
6102 globals->fix_v4bx = fix_v4bx;
6103 globals->use_blx |= use_blx;
6104 globals->vfp11_fix = vfp11_fix;
6105 globals->pic_veneer = pic_veneer;
6106 globals->fix_cortex_a8 = fix_cortex_a8;
6107
6108 BFD_ASSERT (is_arm_elf (output_bfd));
6109 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6110 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6111 }
6112
6113 /* Replace the target offset of a Thumb bl or b.w instruction. */
6114
6115 static void
6116 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6117 {
6118 bfd_vma upper;
6119 bfd_vma lower;
6120 int reloc_sign;
6121
6122 BFD_ASSERT ((offset & 1) == 0);
6123
6124 upper = bfd_get_16 (abfd, insn);
6125 lower = bfd_get_16 (abfd, insn + 2);
6126 reloc_sign = (offset < 0) ? 1 : 0;
6127 upper = (upper & ~(bfd_vma) 0x7ff)
6128 | ((offset >> 12) & 0x3ff)
6129 | (reloc_sign << 10);
6130 lower = (lower & ~(bfd_vma) 0x2fff)
6131 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6132 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6133 | ((offset >> 1) & 0x7ff);
6134 bfd_put_16 (abfd, upper, insn);
6135 bfd_put_16 (abfd, lower, insn + 2);
6136 }
6137
6138 /* Thumb code calling an ARM function. */
6139
6140 static int
6141 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6142 const char * name,
6143 bfd * input_bfd,
6144 bfd * output_bfd,
6145 asection * input_section,
6146 bfd_byte * hit_data,
6147 asection * sym_sec,
6148 bfd_vma offset,
6149 bfd_signed_vma addend,
6150 bfd_vma val,
6151 char **error_message)
6152 {
6153 asection * s = 0;
6154 bfd_vma my_offset;
6155 long int ret_offset;
6156 struct elf_link_hash_entry * myh;
6157 struct elf32_arm_link_hash_table * globals;
6158
6159 myh = find_thumb_glue (info, name, error_message);
6160 if (myh == NULL)
6161 return FALSE;
6162
6163 globals = elf32_arm_hash_table (info);
6164
6165 BFD_ASSERT (globals != NULL);
6166 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6167
6168 my_offset = myh->root.u.def.value;
6169
6170 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6171 THUMB2ARM_GLUE_SECTION_NAME);
6172
6173 BFD_ASSERT (s != NULL);
6174 BFD_ASSERT (s->contents != NULL);
6175 BFD_ASSERT (s->output_section != NULL);
6176
6177 if ((my_offset & 0x01) == 0x01)
6178 {
6179 if (sym_sec != NULL
6180 && sym_sec->owner != NULL
6181 && !INTERWORK_FLAG (sym_sec->owner))
6182 {
6183 (*_bfd_error_handler)
6184 (_("%B(%s): warning: interworking not enabled.\n"
6185 " first occurrence: %B: thumb call to arm"),
6186 sym_sec->owner, input_bfd, name);
6187
6188 return FALSE;
6189 }
6190
6191 --my_offset;
6192 myh->root.u.def.value = my_offset;
6193
6194 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6195 s->contents + my_offset);
6196
6197 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6198 s->contents + my_offset + 2);
6199
6200 ret_offset =
6201 /* Address of destination of the stub. */
6202 ((bfd_signed_vma) val)
6203 - ((bfd_signed_vma)
6204 /* Offset from the start of the current section
6205 to the start of the stubs. */
6206 (s->output_offset
6207 /* Offset of the start of this stub from the start of the stubs. */
6208 + my_offset
6209 /* Address of the start of the current section. */
6210 + s->output_section->vma)
6211 /* The branch instruction is 4 bytes into the stub. */
6212 + 4
6213 /* ARM branches work from the pc of the instruction + 8. */
6214 + 8);
6215
6216 put_arm_insn (globals, output_bfd,
6217 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6218 s->contents + my_offset + 4);
6219 }
6220
6221 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6222
6223 /* Now go back and fix up the original BL insn to point to here. */
6224 ret_offset =
6225 /* Address of where the stub is located. */
6226 (s->output_section->vma + s->output_offset + my_offset)
6227 /* Address of where the BL is located. */
6228 - (input_section->output_section->vma + input_section->output_offset
6229 + offset)
6230 /* Addend in the relocation. */
6231 - addend
6232 /* Biassing for PC-relative addressing. */
6233 - 8;
6234
6235 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6236
6237 return TRUE;
6238 }
6239
6240 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6241
6242 static struct elf_link_hash_entry *
6243 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6244 const char * name,
6245 bfd * input_bfd,
6246 bfd * output_bfd,
6247 asection * sym_sec,
6248 bfd_vma val,
6249 asection * s,
6250 char ** error_message)
6251 {
6252 bfd_vma my_offset;
6253 long int ret_offset;
6254 struct elf_link_hash_entry * myh;
6255 struct elf32_arm_link_hash_table * globals;
6256
6257 myh = find_arm_glue (info, name, error_message);
6258 if (myh == NULL)
6259 return NULL;
6260
6261 globals = elf32_arm_hash_table (info);
6262
6263 BFD_ASSERT (globals != NULL);
6264 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6265
6266 my_offset = myh->root.u.def.value;
6267
6268 if ((my_offset & 0x01) == 0x01)
6269 {
6270 if (sym_sec != NULL
6271 && sym_sec->owner != NULL
6272 && !INTERWORK_FLAG (sym_sec->owner))
6273 {
6274 (*_bfd_error_handler)
6275 (_("%B(%s): warning: interworking not enabled.\n"
6276 " first occurrence: %B: arm call to thumb"),
6277 sym_sec->owner, input_bfd, name);
6278 }
6279
6280 --my_offset;
6281 myh->root.u.def.value = my_offset;
6282
6283 if (info->shared || globals->root.is_relocatable_executable
6284 || globals->pic_veneer)
6285 {
6286 /* For relocatable objects we can't use absolute addresses,
6287 so construct the address from a relative offset. */
6288 /* TODO: If the offset is small it's probably worth
6289 constructing the address with adds. */
6290 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6291 s->contents + my_offset);
6292 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6293 s->contents + my_offset + 4);
6294 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6295 s->contents + my_offset + 8);
6296 /* Adjust the offset by 4 for the position of the add,
6297 and 8 for the pipeline offset. */
6298 ret_offset = (val - (s->output_offset
6299 + s->output_section->vma
6300 + my_offset + 12))
6301 | 1;
6302 bfd_put_32 (output_bfd, ret_offset,
6303 s->contents + my_offset + 12);
6304 }
6305 else if (globals->use_blx)
6306 {
6307 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6308 s->contents + my_offset);
6309
6310 /* It's a thumb address. Add the low order bit. */
6311 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6312 s->contents + my_offset + 4);
6313 }
6314 else
6315 {
6316 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6317 s->contents + my_offset);
6318
6319 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6320 s->contents + my_offset + 4);
6321
6322 /* It's a thumb address. Add the low order bit. */
6323 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6324 s->contents + my_offset + 8);
6325
6326 my_offset += 12;
6327 }
6328 }
6329
6330 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6331
6332 return myh;
6333 }
6334
6335 /* Arm code calling a Thumb function. */
6336
6337 static int
6338 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6339 const char * name,
6340 bfd * input_bfd,
6341 bfd * output_bfd,
6342 asection * input_section,
6343 bfd_byte * hit_data,
6344 asection * sym_sec,
6345 bfd_vma offset,
6346 bfd_signed_vma addend,
6347 bfd_vma val,
6348 char **error_message)
6349 {
6350 unsigned long int tmp;
6351 bfd_vma my_offset;
6352 asection * s;
6353 long int ret_offset;
6354 struct elf_link_hash_entry * myh;
6355 struct elf32_arm_link_hash_table * globals;
6356
6357 globals = elf32_arm_hash_table (info);
6358
6359 BFD_ASSERT (globals != NULL);
6360 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6361
6362 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6363 ARM2THUMB_GLUE_SECTION_NAME);
6364 BFD_ASSERT (s != NULL);
6365 BFD_ASSERT (s->contents != NULL);
6366 BFD_ASSERT (s->output_section != NULL);
6367
6368 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6369 sym_sec, val, s, error_message);
6370 if (!myh)
6371 return FALSE;
6372
6373 my_offset = myh->root.u.def.value;
6374 tmp = bfd_get_32 (input_bfd, hit_data);
6375 tmp = tmp & 0xFF000000;
6376
6377 /* Somehow these are both 4 too far, so subtract 8. */
6378 ret_offset = (s->output_offset
6379 + my_offset
6380 + s->output_section->vma
6381 - (input_section->output_offset
6382 + input_section->output_section->vma
6383 + offset + addend)
6384 - 8);
6385
6386 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6387
6388 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6389
6390 return TRUE;
6391 }
6392
6393 /* Populate Arm stub for an exported Thumb function. */
6394
6395 static bfd_boolean
6396 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6397 {
6398 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6399 asection * s;
6400 struct elf_link_hash_entry * myh;
6401 struct elf32_arm_link_hash_entry *eh;
6402 struct elf32_arm_link_hash_table * globals;
6403 asection *sec;
6404 bfd_vma val;
6405 char *error_message;
6406
6407 eh = elf32_arm_hash_entry (h);
6408 /* Allocate stubs for exported Thumb functions on v4t. */
6409 if (eh->export_glue == NULL)
6410 return TRUE;
6411
6412 globals = elf32_arm_hash_table (info);
6413
6414 BFD_ASSERT (globals != NULL);
6415 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6416
6417 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6418 ARM2THUMB_GLUE_SECTION_NAME);
6419 BFD_ASSERT (s != NULL);
6420 BFD_ASSERT (s->contents != NULL);
6421 BFD_ASSERT (s->output_section != NULL);
6422
6423 sec = eh->export_glue->root.u.def.section;
6424
6425 BFD_ASSERT (sec->output_section != NULL);
6426
6427 val = eh->export_glue->root.u.def.value + sec->output_offset
6428 + sec->output_section->vma;
6429
6430 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6431 h->root.u.def.section->owner,
6432 globals->obfd, sec, val, s,
6433 &error_message);
6434 BFD_ASSERT (myh);
6435 return TRUE;
6436 }
6437
6438 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6439
6440 static bfd_vma
6441 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6442 {
6443 bfd_byte *p;
6444 bfd_vma glue_addr;
6445 asection *s;
6446 struct elf32_arm_link_hash_table *globals;
6447
6448 globals = elf32_arm_hash_table (info);
6449
6450 BFD_ASSERT (globals != NULL);
6451 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6452
6453 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6454 ARM_BX_GLUE_SECTION_NAME);
6455 BFD_ASSERT (s != NULL);
6456 BFD_ASSERT (s->contents != NULL);
6457 BFD_ASSERT (s->output_section != NULL);
6458
6459 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6460
6461 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6462
6463 if ((globals->bx_glue_offset[reg] & 1) == 0)
6464 {
6465 p = s->contents + glue_addr;
6466 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6467 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6468 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6469 globals->bx_glue_offset[reg] |= 1;
6470 }
6471
6472 return glue_addr + s->output_section->vma + s->output_offset;
6473 }
6474
6475 /* Generate Arm stubs for exported Thumb symbols. */
6476 static void
6477 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6478 struct bfd_link_info *link_info)
6479 {
6480 struct elf32_arm_link_hash_table * globals;
6481
6482 if (link_info == NULL)
6483 /* Ignore this if we are not called by the ELF backend linker. */
6484 return;
6485
6486 globals = elf32_arm_hash_table (link_info);
6487 /* If blx is available then exported Thumb symbols are OK and there is
6488 nothing to do. */
6489 if (globals->use_blx)
6490 return;
6491
6492 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6493 link_info);
6494 }
6495
6496 /* Some relocations map to different relocations depending on the
6497 target. Return the real relocation. */
6498
6499 static int
6500 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6501 int r_type)
6502 {
6503 switch (r_type)
6504 {
6505 case R_ARM_TARGET1:
6506 if (globals->target1_is_rel)
6507 return R_ARM_REL32;
6508 else
6509 return R_ARM_ABS32;
6510
6511 case R_ARM_TARGET2:
6512 return globals->target2_reloc;
6513
6514 default:
6515 return r_type;
6516 }
6517 }
6518
6519 /* Return the base VMA address which should be subtracted from real addresses
6520 when resolving @dtpoff relocation.
6521 This is PT_TLS segment p_vaddr. */
6522
6523 static bfd_vma
6524 dtpoff_base (struct bfd_link_info *info)
6525 {
6526 /* If tls_sec is NULL, we should have signalled an error already. */
6527 if (elf_hash_table (info)->tls_sec == NULL)
6528 return 0;
6529 return elf_hash_table (info)->tls_sec->vma;
6530 }
6531
6532 /* Return the relocation value for @tpoff relocation
6533 if STT_TLS virtual address is ADDRESS. */
6534
6535 static bfd_vma
6536 tpoff (struct bfd_link_info *info, bfd_vma address)
6537 {
6538 struct elf_link_hash_table *htab = elf_hash_table (info);
6539 bfd_vma base;
6540
6541 /* If tls_sec is NULL, we should have signalled an error already. */
6542 if (htab->tls_sec == NULL)
6543 return 0;
6544 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6545 return address - htab->tls_sec->vma + base;
6546 }
6547
6548 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6549 VALUE is the relocation value. */
6550
6551 static bfd_reloc_status_type
6552 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6553 {
6554 if (value > 0xfff)
6555 return bfd_reloc_overflow;
6556
6557 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6558 bfd_put_32 (abfd, value, data);
6559 return bfd_reloc_ok;
6560 }
6561
6562 /* For a given value of n, calculate the value of G_n as required to
6563 deal with group relocations. We return it in the form of an
6564 encoded constant-and-rotation, together with the final residual. If n is
6565 specified as less than zero, then final_residual is filled with the
6566 input value and no further action is performed. */
6567
6568 static bfd_vma
6569 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6570 {
6571 int current_n;
6572 bfd_vma g_n;
6573 bfd_vma encoded_g_n = 0;
6574 bfd_vma residual = value; /* Also known as Y_n. */
6575
6576 for (current_n = 0; current_n <= n; current_n++)
6577 {
6578 int shift;
6579
6580 /* Calculate which part of the value to mask. */
6581 if (residual == 0)
6582 shift = 0;
6583 else
6584 {
6585 int msb;
6586
6587 /* Determine the most significant bit in the residual and
6588 align the resulting value to a 2-bit boundary. */
6589 for (msb = 30; msb >= 0; msb -= 2)
6590 if (residual & (3 << msb))
6591 break;
6592
6593 /* The desired shift is now (msb - 6), or zero, whichever
6594 is the greater. */
6595 shift = msb - 6;
6596 if (shift < 0)
6597 shift = 0;
6598 }
6599
6600 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6601 g_n = residual & (0xff << shift);
6602 encoded_g_n = (g_n >> shift)
6603 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6604
6605 /* Calculate the residual for the next time around. */
6606 residual &= ~g_n;
6607 }
6608
6609 *final_residual = residual;
6610
6611 return encoded_g_n;
6612 }
6613
6614 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6615 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6616
6617 static int
6618 identify_add_or_sub (bfd_vma insn)
6619 {
6620 int opcode = insn & 0x1e00000;
6621
6622 if (opcode == 1 << 23) /* ADD */
6623 return 1;
6624
6625 if (opcode == 1 << 22) /* SUB */
6626 return -1;
6627
6628 return 0;
6629 }
6630
6631 /* Perform a relocation as part of a final link. */
6632
6633 static bfd_reloc_status_type
6634 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6635 bfd * input_bfd,
6636 bfd * output_bfd,
6637 asection * input_section,
6638 bfd_byte * contents,
6639 Elf_Internal_Rela * rel,
6640 bfd_vma value,
6641 struct bfd_link_info * info,
6642 asection * sym_sec,
6643 const char * sym_name,
6644 int sym_flags,
6645 struct elf_link_hash_entry * h,
6646 bfd_boolean * unresolved_reloc_p,
6647 char ** error_message)
6648 {
6649 unsigned long r_type = howto->type;
6650 unsigned long r_symndx;
6651 bfd_byte * hit_data = contents + rel->r_offset;
6652 bfd * dynobj = NULL;
6653 Elf_Internal_Shdr * symtab_hdr;
6654 struct elf_link_hash_entry ** sym_hashes;
6655 bfd_vma * local_got_offsets;
6656 asection * sgot = NULL;
6657 asection * splt = NULL;
6658 asection * sreloc = NULL;
6659 bfd_vma addend;
6660 bfd_signed_vma signed_addend;
6661 struct elf32_arm_link_hash_table * globals;
6662
6663 globals = elf32_arm_hash_table (info);
6664
6665 BFD_ASSERT (is_arm_elf (input_bfd));
6666
6667 /* Some relocation types map to different relocations depending on the
6668 target. We pick the right one here. */
6669 r_type = arm_real_reloc_type (globals, r_type);
6670 if (r_type != howto->type)
6671 howto = elf32_arm_howto_from_type (r_type);
6672
6673 /* If the start address has been set, then set the EF_ARM_HASENTRY
6674 flag. Setting this more than once is redundant, but the cost is
6675 not too high, and it keeps the code simple.
6676
6677 The test is done here, rather than somewhere else, because the
6678 start address is only set just before the final link commences.
6679
6680 Note - if the user deliberately sets a start address of 0, the
6681 flag will not be set. */
6682 if (bfd_get_start_address (output_bfd) != 0)
6683 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6684
6685 dynobj = elf_hash_table (info)->dynobj;
6686 if (dynobj)
6687 {
6688 sgot = bfd_get_section_by_name (dynobj, ".got");
6689 splt = bfd_get_section_by_name (dynobj, ".plt");
6690 }
6691 symtab_hdr = & elf_symtab_hdr (input_bfd);
6692 sym_hashes = elf_sym_hashes (input_bfd);
6693 local_got_offsets = elf_local_got_offsets (input_bfd);
6694 r_symndx = ELF32_R_SYM (rel->r_info);
6695
6696 if (globals->use_rel)
6697 {
6698 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6699
6700 if (addend & ((howto->src_mask + 1) >> 1))
6701 {
6702 signed_addend = -1;
6703 signed_addend &= ~ howto->src_mask;
6704 signed_addend |= addend;
6705 }
6706 else
6707 signed_addend = addend;
6708 }
6709 else
6710 addend = signed_addend = rel->r_addend;
6711
6712 switch (r_type)
6713 {
6714 case R_ARM_NONE:
6715 /* We don't need to find a value for this symbol. It's just a
6716 marker. */
6717 *unresolved_reloc_p = FALSE;
6718 return bfd_reloc_ok;
6719
6720 case R_ARM_ABS12:
6721 if (!globals->vxworks_p)
6722 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6723
6724 case R_ARM_PC24:
6725 case R_ARM_ABS32:
6726 case R_ARM_ABS32_NOI:
6727 case R_ARM_REL32:
6728 case R_ARM_REL32_NOI:
6729 case R_ARM_CALL:
6730 case R_ARM_JUMP24:
6731 case R_ARM_XPC25:
6732 case R_ARM_PREL31:
6733 case R_ARM_PLT32:
6734 /* Handle relocations which should use the PLT entry. ABS32/REL32
6735 will use the symbol's value, which may point to a PLT entry, but we
6736 don't need to handle that here. If we created a PLT entry, all
6737 branches in this object should go to it, except if the PLT is too
6738 far away, in which case a long branch stub should be inserted. */
6739 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6740 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6741 && r_type != R_ARM_CALL
6742 && r_type != R_ARM_JUMP24
6743 && r_type != R_ARM_PLT32)
6744 && h != NULL
6745 && splt != NULL
6746 && h->plt.offset != (bfd_vma) -1)
6747 {
6748 /* If we've created a .plt section, and assigned a PLT entry to
6749 this function, it should not be known to bind locally. If
6750 it were, we would have cleared the PLT entry. */
6751 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6752
6753 value = (splt->output_section->vma
6754 + splt->output_offset
6755 + h->plt.offset);
6756 *unresolved_reloc_p = FALSE;
6757 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6758 contents, rel->r_offset, value,
6759 rel->r_addend);
6760 }
6761
6762 /* When generating a shared object or relocatable executable, these
6763 relocations are copied into the output file to be resolved at
6764 run time. */
6765 if ((info->shared || globals->root.is_relocatable_executable)
6766 && (input_section->flags & SEC_ALLOC)
6767 && !(elf32_arm_hash_table (info)->vxworks_p
6768 && strcmp (input_section->output_section->name,
6769 ".tls_vars") == 0)
6770 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6771 || !SYMBOL_CALLS_LOCAL (info, h))
6772 && (h == NULL
6773 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6774 || h->root.type != bfd_link_hash_undefweak)
6775 && r_type != R_ARM_PC24
6776 && r_type != R_ARM_CALL
6777 && r_type != R_ARM_JUMP24
6778 && r_type != R_ARM_PREL31
6779 && r_type != R_ARM_PLT32)
6780 {
6781 Elf_Internal_Rela outrel;
6782 bfd_byte *loc;
6783 bfd_boolean skip, relocate;
6784
6785 *unresolved_reloc_p = FALSE;
6786
6787 if (sreloc == NULL)
6788 {
6789 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6790 ! globals->use_rel);
6791
6792 if (sreloc == NULL)
6793 return bfd_reloc_notsupported;
6794 }
6795
6796 skip = FALSE;
6797 relocate = FALSE;
6798
6799 outrel.r_addend = addend;
6800 outrel.r_offset =
6801 _bfd_elf_section_offset (output_bfd, info, input_section,
6802 rel->r_offset);
6803 if (outrel.r_offset == (bfd_vma) -1)
6804 skip = TRUE;
6805 else if (outrel.r_offset == (bfd_vma) -2)
6806 skip = TRUE, relocate = TRUE;
6807 outrel.r_offset += (input_section->output_section->vma
6808 + input_section->output_offset);
6809
6810 if (skip)
6811 memset (&outrel, 0, sizeof outrel);
6812 else if (h != NULL
6813 && h->dynindx != -1
6814 && (!info->shared
6815 || !info->symbolic
6816 || !h->def_regular))
6817 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6818 else
6819 {
6820 int symbol;
6821
6822 /* This symbol is local, or marked to become local. */
6823 if (sym_flags == STT_ARM_TFUNC)
6824 value |= 1;
6825 if (globals->symbian_p)
6826 {
6827 asection *osec;
6828
6829 /* On Symbian OS, the data segment and text segement
6830 can be relocated independently. Therefore, we
6831 must indicate the segment to which this
6832 relocation is relative. The BPABI allows us to
6833 use any symbol in the right segment; we just use
6834 the section symbol as it is convenient. (We
6835 cannot use the symbol given by "h" directly as it
6836 will not appear in the dynamic symbol table.)
6837
6838 Note that the dynamic linker ignores the section
6839 symbol value, so we don't subtract osec->vma
6840 from the emitted reloc addend. */
6841 if (sym_sec)
6842 osec = sym_sec->output_section;
6843 else
6844 osec = input_section->output_section;
6845 symbol = elf_section_data (osec)->dynindx;
6846 if (symbol == 0)
6847 {
6848 struct elf_link_hash_table *htab = elf_hash_table (info);
6849
6850 if ((osec->flags & SEC_READONLY) == 0
6851 && htab->data_index_section != NULL)
6852 osec = htab->data_index_section;
6853 else
6854 osec = htab->text_index_section;
6855 symbol = elf_section_data (osec)->dynindx;
6856 }
6857 BFD_ASSERT (symbol != 0);
6858 }
6859 else
6860 /* On SVR4-ish systems, the dynamic loader cannot
6861 relocate the text and data segments independently,
6862 so the symbol does not matter. */
6863 symbol = 0;
6864 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6865 if (globals->use_rel)
6866 relocate = TRUE;
6867 else
6868 outrel.r_addend += value;
6869 }
6870
6871 loc = sreloc->contents;
6872 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6873 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6874
6875 /* If this reloc is against an external symbol, we do not want to
6876 fiddle with the addend. Otherwise, we need to include the symbol
6877 value so that it becomes an addend for the dynamic reloc. */
6878 if (! relocate)
6879 return bfd_reloc_ok;
6880
6881 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6882 contents, rel->r_offset, value,
6883 (bfd_vma) 0);
6884 }
6885 else switch (r_type)
6886 {
6887 case R_ARM_ABS12:
6888 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6889
6890 case R_ARM_XPC25: /* Arm BLX instruction. */
6891 case R_ARM_CALL:
6892 case R_ARM_JUMP24:
6893 case R_ARM_PC24: /* Arm B/BL instruction. */
6894 case R_ARM_PLT32:
6895 {
6896 bfd_vma from;
6897 bfd_signed_vma branch_offset;
6898 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6899
6900 if (r_type == R_ARM_XPC25)
6901 {
6902 /* Check for Arm calling Arm function. */
6903 /* FIXME: Should we translate the instruction into a BL
6904 instruction instead ? */
6905 if (sym_flags != STT_ARM_TFUNC)
6906 (*_bfd_error_handler)
6907 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6908 input_bfd,
6909 h ? h->root.root.string : "(local)");
6910 }
6911 else if (r_type == R_ARM_PC24)
6912 {
6913 /* Check for Arm calling Thumb function. */
6914 if (sym_flags == STT_ARM_TFUNC)
6915 {
6916 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6917 output_bfd, input_section,
6918 hit_data, sym_sec, rel->r_offset,
6919 signed_addend, value,
6920 error_message))
6921 return bfd_reloc_ok;
6922 else
6923 return bfd_reloc_dangerous;
6924 }
6925 }
6926
6927 /* Check if a stub has to be inserted because the
6928 destination is too far or we are changing mode. */
6929 if ( r_type == R_ARM_CALL
6930 || r_type == R_ARM_JUMP24
6931 || r_type == R_ARM_PLT32)
6932 {
6933 /* If the call goes through a PLT entry, make sure to
6934 check distance to the right destination address. */
6935 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6936 {
6937 value = (splt->output_section->vma
6938 + splt->output_offset
6939 + h->plt.offset);
6940 *unresolved_reloc_p = FALSE;
6941 }
6942
6943 from = (input_section->output_section->vma
6944 + input_section->output_offset
6945 + rel->r_offset);
6946 branch_offset = (bfd_signed_vma)(value - from);
6947
6948 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6949 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6950 || ((sym_flags == STT_ARM_TFUNC)
6951 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6952 || (r_type == R_ARM_JUMP24)
6953 || (r_type == R_ARM_PLT32) ))
6954 )
6955 {
6956 /* The target is out of reach, so redirect the
6957 branch to the local stub for this function. */
6958
6959 stub_entry = elf32_arm_get_stub_entry (input_section,
6960 sym_sec, h,
6961 rel, globals);
6962 if (stub_entry != NULL)
6963 value = (stub_entry->stub_offset
6964 + stub_entry->stub_sec->output_offset
6965 + stub_entry->stub_sec->output_section->vma);
6966 }
6967 }
6968
6969 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6970 where:
6971 S is the address of the symbol in the relocation.
6972 P is address of the instruction being relocated.
6973 A is the addend (extracted from the instruction) in bytes.
6974
6975 S is held in 'value'.
6976 P is the base address of the section containing the
6977 instruction plus the offset of the reloc into that
6978 section, ie:
6979 (input_section->output_section->vma +
6980 input_section->output_offset +
6981 rel->r_offset).
6982 A is the addend, converted into bytes, ie:
6983 (signed_addend * 4)
6984
6985 Note: None of these operations have knowledge of the pipeline
6986 size of the processor, thus it is up to the assembler to
6987 encode this information into the addend. */
6988 value -= (input_section->output_section->vma
6989 + input_section->output_offset);
6990 value -= rel->r_offset;
6991 if (globals->use_rel)
6992 value += (signed_addend << howto->size);
6993 else
6994 /* RELA addends do not have to be adjusted by howto->size. */
6995 value += signed_addend;
6996
6997 signed_addend = value;
6998 signed_addend >>= howto->rightshift;
6999
7000 /* A branch to an undefined weak symbol is turned into a jump to
7001 the next instruction unless a PLT entry will be created. */
7002 if (h && h->root.type == bfd_link_hash_undefweak
7003 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7004 {
7005 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7006 | 0x0affffff;
7007 }
7008 else
7009 {
7010 /* Perform a signed range check. */
7011 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7012 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7013 return bfd_reloc_overflow;
7014
7015 addend = (value & 2);
7016
7017 value = (signed_addend & howto->dst_mask)
7018 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7019
7020 if (r_type == R_ARM_CALL)
7021 {
7022 /* Set the H bit in the BLX instruction. */
7023 if (sym_flags == STT_ARM_TFUNC)
7024 {
7025 if (addend)
7026 value |= (1 << 24);
7027 else
7028 value &= ~(bfd_vma)(1 << 24);
7029 }
7030
7031 /* Select the correct instruction (BL or BLX). */
7032 /* Only if we are not handling a BL to a stub. In this
7033 case, mode switching is performed by the stub. */
7034 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7035 value |= (1 << 28);
7036 else
7037 {
7038 value &= ~(bfd_vma)(1 << 28);
7039 value |= (1 << 24);
7040 }
7041 }
7042 }
7043 }
7044 break;
7045
7046 case R_ARM_ABS32:
7047 value += addend;
7048 if (sym_flags == STT_ARM_TFUNC)
7049 value |= 1;
7050 break;
7051
7052 case R_ARM_ABS32_NOI:
7053 value += addend;
7054 break;
7055
7056 case R_ARM_REL32:
7057 value += addend;
7058 if (sym_flags == STT_ARM_TFUNC)
7059 value |= 1;
7060 value -= (input_section->output_section->vma
7061 + input_section->output_offset + rel->r_offset);
7062 break;
7063
7064 case R_ARM_REL32_NOI:
7065 value += addend;
7066 value -= (input_section->output_section->vma
7067 + input_section->output_offset + rel->r_offset);
7068 break;
7069
7070 case R_ARM_PREL31:
7071 value -= (input_section->output_section->vma
7072 + input_section->output_offset + rel->r_offset);
7073 value += signed_addend;
7074 if (! h || h->root.type != bfd_link_hash_undefweak)
7075 {
7076 /* Check for overflow. */
7077 if ((value ^ (value >> 1)) & (1 << 30))
7078 return bfd_reloc_overflow;
7079 }
7080 value &= 0x7fffffff;
7081 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7082 if (sym_flags == STT_ARM_TFUNC)
7083 value |= 1;
7084 break;
7085 }
7086
7087 bfd_put_32 (input_bfd, value, hit_data);
7088 return bfd_reloc_ok;
7089
7090 case R_ARM_ABS8:
7091 value += addend;
7092 if ((long) value > 0x7f || (long) value < -0x80)
7093 return bfd_reloc_overflow;
7094
7095 bfd_put_8 (input_bfd, value, hit_data);
7096 return bfd_reloc_ok;
7097
7098 case R_ARM_ABS16:
7099 value += addend;
7100
7101 if ((long) value > 0x7fff || (long) value < -0x8000)
7102 return bfd_reloc_overflow;
7103
7104 bfd_put_16 (input_bfd, value, hit_data);
7105 return bfd_reloc_ok;
7106
7107 case R_ARM_THM_ABS5:
7108 /* Support ldr and str instructions for the thumb. */
7109 if (globals->use_rel)
7110 {
7111 /* Need to refetch addend. */
7112 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7113 /* ??? Need to determine shift amount from operand size. */
7114 addend >>= howto->rightshift;
7115 }
7116 value += addend;
7117
7118 /* ??? Isn't value unsigned? */
7119 if ((long) value > 0x1f || (long) value < -0x10)
7120 return bfd_reloc_overflow;
7121
7122 /* ??? Value needs to be properly shifted into place first. */
7123 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7124 bfd_put_16 (input_bfd, value, hit_data);
7125 return bfd_reloc_ok;
7126
7127 case R_ARM_THM_ALU_PREL_11_0:
7128 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7129 {
7130 bfd_vma insn;
7131 bfd_signed_vma relocation;
7132
7133 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7134 | bfd_get_16 (input_bfd, hit_data + 2);
7135
7136 if (globals->use_rel)
7137 {
7138 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7139 | ((insn & (1 << 26)) >> 15);
7140 if (insn & 0xf00000)
7141 signed_addend = -signed_addend;
7142 }
7143
7144 relocation = value + signed_addend;
7145 relocation -= (input_section->output_section->vma
7146 + input_section->output_offset
7147 + rel->r_offset);
7148
7149 value = abs (relocation);
7150
7151 if (value >= 0x1000)
7152 return bfd_reloc_overflow;
7153
7154 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7155 | ((value & 0x700) << 4)
7156 | ((value & 0x800) << 15);
7157 if (relocation < 0)
7158 insn |= 0xa00000;
7159
7160 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7161 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7162
7163 return bfd_reloc_ok;
7164 }
7165
7166 case R_ARM_THM_PC12:
7167 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7168 {
7169 bfd_vma insn;
7170 bfd_signed_vma relocation;
7171
7172 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7173 | bfd_get_16 (input_bfd, hit_data + 2);
7174
7175 if (globals->use_rel)
7176 {
7177 signed_addend = insn & 0xfff;
7178 if (!(insn & (1 << 23)))
7179 signed_addend = -signed_addend;
7180 }
7181
7182 relocation = value + signed_addend;
7183 relocation -= (input_section->output_section->vma
7184 + input_section->output_offset
7185 + rel->r_offset);
7186
7187 value = abs (relocation);
7188
7189 if (value >= 0x1000)
7190 return bfd_reloc_overflow;
7191
7192 insn = (insn & 0xff7ff000) | value;
7193 if (relocation >= 0)
7194 insn |= (1 << 23);
7195
7196 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7197 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7198
7199 return bfd_reloc_ok;
7200 }
7201
7202 case R_ARM_THM_XPC22:
7203 case R_ARM_THM_CALL:
7204 case R_ARM_THM_JUMP24:
7205 /* Thumb BL (branch long instruction). */
7206 {
7207 bfd_vma relocation;
7208 bfd_vma reloc_sign;
7209 bfd_boolean overflow = FALSE;
7210 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7211 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7212 bfd_signed_vma reloc_signed_max;
7213 bfd_signed_vma reloc_signed_min;
7214 bfd_vma check;
7215 bfd_signed_vma signed_check;
7216 int bitsize;
7217 int thumb2 = using_thumb2 (globals);
7218
7219 /* A branch to an undefined weak symbol is turned into a jump to
7220 the next instruction unless a PLT entry will be created. */
7221 if (h && h->root.type == bfd_link_hash_undefweak
7222 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7223 {
7224 bfd_put_16 (input_bfd, 0xe000, hit_data);
7225 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7226 return bfd_reloc_ok;
7227 }
7228
7229 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7230 with Thumb-1) involving the J1 and J2 bits. */
7231 if (globals->use_rel)
7232 {
7233 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7234 bfd_vma upper = upper_insn & 0x3ff;
7235 bfd_vma lower = lower_insn & 0x7ff;
7236 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7237 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7238 bfd_vma i1 = j1 ^ s ? 0 : 1;
7239 bfd_vma i2 = j2 ^ s ? 0 : 1;
7240
7241 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7242 /* Sign extend. */
7243 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7244
7245 signed_addend = addend;
7246 }
7247
7248 if (r_type == R_ARM_THM_XPC22)
7249 {
7250 /* Check for Thumb to Thumb call. */
7251 /* FIXME: Should we translate the instruction into a BL
7252 instruction instead ? */
7253 if (sym_flags == STT_ARM_TFUNC)
7254 (*_bfd_error_handler)
7255 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7256 input_bfd,
7257 h ? h->root.root.string : "(local)");
7258 }
7259 else
7260 {
7261 /* If it is not a call to Thumb, assume call to Arm.
7262 If it is a call relative to a section name, then it is not a
7263 function call at all, but rather a long jump. Calls through
7264 the PLT do not require stubs. */
7265 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7266 && (h == NULL || splt == NULL
7267 || h->plt.offset == (bfd_vma) -1))
7268 {
7269 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7270 {
7271 /* Convert BL to BLX. */
7272 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7273 }
7274 else if (( r_type != R_ARM_THM_CALL)
7275 && (r_type != R_ARM_THM_JUMP24))
7276 {
7277 if (elf32_thumb_to_arm_stub
7278 (info, sym_name, input_bfd, output_bfd, input_section,
7279 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7280 error_message))
7281 return bfd_reloc_ok;
7282 else
7283 return bfd_reloc_dangerous;
7284 }
7285 }
7286 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7287 && r_type == R_ARM_THM_CALL)
7288 {
7289 /* Make sure this is a BL. */
7290 lower_insn |= 0x1800;
7291 }
7292 }
7293
7294 /* Handle calls via the PLT. */
7295 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7296 {
7297 value = (splt->output_section->vma
7298 + splt->output_offset
7299 + h->plt.offset);
7300 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7301 {
7302 /* If the Thumb BLX instruction is available, convert the
7303 BL to a BLX instruction to call the ARM-mode PLT entry. */
7304 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7305 }
7306 else
7307 /* Target the Thumb stub before the ARM PLT entry. */
7308 value -= PLT_THUMB_STUB_SIZE;
7309 *unresolved_reloc_p = FALSE;
7310 }
7311
7312 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7313 {
7314 /* Check if a stub has to be inserted because the destination
7315 is too far. */
7316 bfd_vma from;
7317 bfd_signed_vma branch_offset;
7318 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7319
7320 from = (input_section->output_section->vma
7321 + input_section->output_offset
7322 + rel->r_offset);
7323 branch_offset = (bfd_signed_vma)(value - from);
7324
7325 if ((!thumb2
7326 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7327 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7328 ||
7329 (thumb2
7330 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7331 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7332 || ((sym_flags != STT_ARM_TFUNC)
7333 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7334 || r_type == R_ARM_THM_JUMP24)))
7335 {
7336 /* The target is out of reach or we are changing modes, so
7337 redirect the branch to the local stub for this
7338 function. */
7339 stub_entry = elf32_arm_get_stub_entry (input_section,
7340 sym_sec, h,
7341 rel, globals);
7342 if (stub_entry != NULL)
7343 value = (stub_entry->stub_offset
7344 + stub_entry->stub_sec->output_offset
7345 + stub_entry->stub_sec->output_section->vma);
7346
7347 /* If this call becomes a call to Arm, force BLX. */
7348 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7349 {
7350 if ((stub_entry
7351 && !arm_stub_is_thumb (stub_entry->stub_type))
7352 || (sym_flags != STT_ARM_TFUNC))
7353 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7354 }
7355 }
7356 }
7357
7358 relocation = value + signed_addend;
7359
7360 relocation -= (input_section->output_section->vma
7361 + input_section->output_offset
7362 + rel->r_offset);
7363
7364 check = relocation >> howto->rightshift;
7365
7366 /* If this is a signed value, the rightshift just dropped
7367 leading 1 bits (assuming twos complement). */
7368 if ((bfd_signed_vma) relocation >= 0)
7369 signed_check = check;
7370 else
7371 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7372
7373 /* Calculate the permissable maximum and minimum values for
7374 this relocation according to whether we're relocating for
7375 Thumb-2 or not. */
7376 bitsize = howto->bitsize;
7377 if (!thumb2)
7378 bitsize -= 2;
7379 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7380 reloc_signed_min = ~reloc_signed_max;
7381
7382 /* Assumes two's complement. */
7383 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7384 overflow = TRUE;
7385
7386 if ((lower_insn & 0x5000) == 0x4000)
7387 /* For a BLX instruction, make sure that the relocation is rounded up
7388 to a word boundary. This follows the semantics of the instruction
7389 which specifies that bit 1 of the target address will come from bit
7390 1 of the base address. */
7391 relocation = (relocation + 2) & ~ 3;
7392
7393 /* Put RELOCATION back into the insn. Assumes two's complement.
7394 We use the Thumb-2 encoding, which is safe even if dealing with
7395 a Thumb-1 instruction by virtue of our overflow check above. */
7396 reloc_sign = (signed_check < 0) ? 1 : 0;
7397 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7398 | ((relocation >> 12) & 0x3ff)
7399 | (reloc_sign << 10);
7400 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7401 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7402 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7403 | ((relocation >> 1) & 0x7ff);
7404
7405 /* Put the relocated value back in the object file: */
7406 bfd_put_16 (input_bfd, upper_insn, hit_data);
7407 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7408
7409 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7410 }
7411 break;
7412
7413 case R_ARM_THM_JUMP19:
7414 /* Thumb32 conditional branch instruction. */
7415 {
7416 bfd_vma relocation;
7417 bfd_boolean overflow = FALSE;
7418 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7419 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7420 bfd_signed_vma reloc_signed_max = 0xffffe;
7421 bfd_signed_vma reloc_signed_min = -0x100000;
7422 bfd_signed_vma signed_check;
7423
7424 /* Need to refetch the addend, reconstruct the top three bits,
7425 and squish the two 11 bit pieces together. */
7426 if (globals->use_rel)
7427 {
7428 bfd_vma S = (upper_insn & 0x0400) >> 10;
7429 bfd_vma upper = (upper_insn & 0x003f);
7430 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7431 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7432 bfd_vma lower = (lower_insn & 0x07ff);
7433
7434 upper |= J1 << 6;
7435 upper |= J2 << 7;
7436 upper |= (!S) << 8;
7437 upper -= 0x0100; /* Sign extend. */
7438
7439 addend = (upper << 12) | (lower << 1);
7440 signed_addend = addend;
7441 }
7442
7443 /* Handle calls via the PLT. */
7444 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7445 {
7446 value = (splt->output_section->vma
7447 + splt->output_offset
7448 + h->plt.offset);
7449 /* Target the Thumb stub before the ARM PLT entry. */
7450 value -= PLT_THUMB_STUB_SIZE;
7451 *unresolved_reloc_p = FALSE;
7452 }
7453
7454 /* ??? Should handle interworking? GCC might someday try to
7455 use this for tail calls. */
7456
7457 relocation = value + signed_addend;
7458 relocation -= (input_section->output_section->vma
7459 + input_section->output_offset
7460 + rel->r_offset);
7461 signed_check = (bfd_signed_vma) relocation;
7462
7463 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7464 overflow = TRUE;
7465
7466 /* Put RELOCATION back into the insn. */
7467 {
7468 bfd_vma S = (relocation & 0x00100000) >> 20;
7469 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7470 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7471 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7472 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7473
7474 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7475 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7476 }
7477
7478 /* Put the relocated value back in the object file: */
7479 bfd_put_16 (input_bfd, upper_insn, hit_data);
7480 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7481
7482 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7483 }
7484
7485 case R_ARM_THM_JUMP11:
7486 case R_ARM_THM_JUMP8:
7487 case R_ARM_THM_JUMP6:
7488 /* Thumb B (branch) instruction). */
7489 {
7490 bfd_signed_vma relocation;
7491 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7492 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7493 bfd_signed_vma signed_check;
7494
7495 /* CZB cannot jump backward. */
7496 if (r_type == R_ARM_THM_JUMP6)
7497 reloc_signed_min = 0;
7498
7499 if (globals->use_rel)
7500 {
7501 /* Need to refetch addend. */
7502 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7503 if (addend & ((howto->src_mask + 1) >> 1))
7504 {
7505 signed_addend = -1;
7506 signed_addend &= ~ howto->src_mask;
7507 signed_addend |= addend;
7508 }
7509 else
7510 signed_addend = addend;
7511 /* The value in the insn has been right shifted. We need to
7512 undo this, so that we can perform the address calculation
7513 in terms of bytes. */
7514 signed_addend <<= howto->rightshift;
7515 }
7516 relocation = value + signed_addend;
7517
7518 relocation -= (input_section->output_section->vma
7519 + input_section->output_offset
7520 + rel->r_offset);
7521
7522 relocation >>= howto->rightshift;
7523 signed_check = relocation;
7524
7525 if (r_type == R_ARM_THM_JUMP6)
7526 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7527 else
7528 relocation &= howto->dst_mask;
7529 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7530
7531 bfd_put_16 (input_bfd, relocation, hit_data);
7532
7533 /* Assumes two's complement. */
7534 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7535 return bfd_reloc_overflow;
7536
7537 return bfd_reloc_ok;
7538 }
7539
7540 case R_ARM_ALU_PCREL7_0:
7541 case R_ARM_ALU_PCREL15_8:
7542 case R_ARM_ALU_PCREL23_15:
7543 {
7544 bfd_vma insn;
7545 bfd_vma relocation;
7546
7547 insn = bfd_get_32 (input_bfd, hit_data);
7548 if (globals->use_rel)
7549 {
7550 /* Extract the addend. */
7551 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7552 signed_addend = addend;
7553 }
7554 relocation = value + signed_addend;
7555
7556 relocation -= (input_section->output_section->vma
7557 + input_section->output_offset
7558 + rel->r_offset);
7559 insn = (insn & ~0xfff)
7560 | ((howto->bitpos << 7) & 0xf00)
7561 | ((relocation >> howto->bitpos) & 0xff);
7562 bfd_put_32 (input_bfd, value, hit_data);
7563 }
7564 return bfd_reloc_ok;
7565
7566 case R_ARM_GNU_VTINHERIT:
7567 case R_ARM_GNU_VTENTRY:
7568 return bfd_reloc_ok;
7569
7570 case R_ARM_GOTOFF32:
7571 /* Relocation is relative to the start of the
7572 global offset table. */
7573
7574 BFD_ASSERT (sgot != NULL);
7575 if (sgot == NULL)
7576 return bfd_reloc_notsupported;
7577
7578 /* If we are addressing a Thumb function, we need to adjust the
7579 address by one, so that attempts to call the function pointer will
7580 correctly interpret it as Thumb code. */
7581 if (sym_flags == STT_ARM_TFUNC)
7582 value += 1;
7583
7584 /* Note that sgot->output_offset is not involved in this
7585 calculation. We always want the start of .got. If we
7586 define _GLOBAL_OFFSET_TABLE in a different way, as is
7587 permitted by the ABI, we might have to change this
7588 calculation. */
7589 value -= sgot->output_section->vma;
7590 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7591 contents, rel->r_offset, value,
7592 rel->r_addend);
7593
7594 case R_ARM_GOTPC:
7595 /* Use global offset table as symbol value. */
7596 BFD_ASSERT (sgot != NULL);
7597
7598 if (sgot == NULL)
7599 return bfd_reloc_notsupported;
7600
7601 *unresolved_reloc_p = FALSE;
7602 value = sgot->output_section->vma;
7603 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7604 contents, rel->r_offset, value,
7605 rel->r_addend);
7606
7607 case R_ARM_GOT32:
7608 case R_ARM_GOT_PREL:
7609 /* Relocation is to the entry for this symbol in the
7610 global offset table. */
7611 if (sgot == NULL)
7612 return bfd_reloc_notsupported;
7613
7614 if (h != NULL)
7615 {
7616 bfd_vma off;
7617 bfd_boolean dyn;
7618
7619 off = h->got.offset;
7620 BFD_ASSERT (off != (bfd_vma) -1);
7621 dyn = globals->root.dynamic_sections_created;
7622
7623 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7624 || (info->shared
7625 && SYMBOL_REFERENCES_LOCAL (info, h))
7626 || (ELF_ST_VISIBILITY (h->other)
7627 && h->root.type == bfd_link_hash_undefweak))
7628 {
7629 /* This is actually a static link, or it is a -Bsymbolic link
7630 and the symbol is defined locally. We must initialize this
7631 entry in the global offset table. Since the offset must
7632 always be a multiple of 4, we use the least significant bit
7633 to record whether we have initialized it already.
7634
7635 When doing a dynamic link, we create a .rel(a).got relocation
7636 entry to initialize the value. This is done in the
7637 finish_dynamic_symbol routine. */
7638 if ((off & 1) != 0)
7639 off &= ~1;
7640 else
7641 {
7642 /* If we are addressing a Thumb function, we need to
7643 adjust the address by one, so that attempts to
7644 call the function pointer will correctly
7645 interpret it as Thumb code. */
7646 if (sym_flags == STT_ARM_TFUNC)
7647 value |= 1;
7648
7649 bfd_put_32 (output_bfd, value, sgot->contents + off);
7650 h->got.offset |= 1;
7651 }
7652 }
7653 else
7654 *unresolved_reloc_p = FALSE;
7655
7656 value = sgot->output_offset + off;
7657 }
7658 else
7659 {
7660 bfd_vma off;
7661
7662 BFD_ASSERT (local_got_offsets != NULL &&
7663 local_got_offsets[r_symndx] != (bfd_vma) -1);
7664
7665 off = local_got_offsets[r_symndx];
7666
7667 /* The offset must always be a multiple of 4. We use the
7668 least significant bit to record whether we have already
7669 generated the necessary reloc. */
7670 if ((off & 1) != 0)
7671 off &= ~1;
7672 else
7673 {
7674 /* If we are addressing a Thumb function, we need to
7675 adjust the address by one, so that attempts to
7676 call the function pointer will correctly
7677 interpret it as Thumb code. */
7678 if (sym_flags == STT_ARM_TFUNC)
7679 value |= 1;
7680
7681 if (globals->use_rel)
7682 bfd_put_32 (output_bfd, value, sgot->contents + off);
7683
7684 if (info->shared)
7685 {
7686 asection * srelgot;
7687 Elf_Internal_Rela outrel;
7688 bfd_byte *loc;
7689
7690 srelgot = (bfd_get_section_by_name
7691 (dynobj, RELOC_SECTION (globals, ".got")));
7692 BFD_ASSERT (srelgot != NULL);
7693
7694 outrel.r_addend = addend + value;
7695 outrel.r_offset = (sgot->output_section->vma
7696 + sgot->output_offset
7697 + off);
7698 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7699 loc = srelgot->contents;
7700 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7701 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7702 }
7703
7704 local_got_offsets[r_symndx] |= 1;
7705 }
7706
7707 value = sgot->output_offset + off;
7708 }
7709 if (r_type != R_ARM_GOT32)
7710 value += sgot->output_section->vma;
7711
7712 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7713 contents, rel->r_offset, value,
7714 rel->r_addend);
7715
7716 case R_ARM_TLS_LDO32:
7717 value = value - dtpoff_base (info);
7718
7719 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7720 contents, rel->r_offset, value,
7721 rel->r_addend);
7722
7723 case R_ARM_TLS_LDM32:
7724 {
7725 bfd_vma off;
7726
7727 if (globals->sgot == NULL)
7728 abort ();
7729
7730 off = globals->tls_ldm_got.offset;
7731
7732 if ((off & 1) != 0)
7733 off &= ~1;
7734 else
7735 {
7736 /* If we don't know the module number, create a relocation
7737 for it. */
7738 if (info->shared)
7739 {
7740 Elf_Internal_Rela outrel;
7741 bfd_byte *loc;
7742
7743 if (globals->srelgot == NULL)
7744 abort ();
7745
7746 outrel.r_addend = 0;
7747 outrel.r_offset = (globals->sgot->output_section->vma
7748 + globals->sgot->output_offset + off);
7749 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7750
7751 if (globals->use_rel)
7752 bfd_put_32 (output_bfd, outrel.r_addend,
7753 globals->sgot->contents + off);
7754
7755 loc = globals->srelgot->contents;
7756 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7757 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7758 }
7759 else
7760 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7761
7762 globals->tls_ldm_got.offset |= 1;
7763 }
7764
7765 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7766 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7767
7768 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7769 contents, rel->r_offset, value,
7770 rel->r_addend);
7771 }
7772
7773 case R_ARM_TLS_GD32:
7774 case R_ARM_TLS_IE32:
7775 {
7776 bfd_vma off;
7777 int indx;
7778 char tls_type;
7779
7780 if (globals->sgot == NULL)
7781 abort ();
7782
7783 indx = 0;
7784 if (h != NULL)
7785 {
7786 bfd_boolean dyn;
7787 dyn = globals->root.dynamic_sections_created;
7788 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7789 && (!info->shared
7790 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7791 {
7792 *unresolved_reloc_p = FALSE;
7793 indx = h->dynindx;
7794 }
7795 off = h->got.offset;
7796 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7797 }
7798 else
7799 {
7800 if (local_got_offsets == NULL)
7801 abort ();
7802 off = local_got_offsets[r_symndx];
7803 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7804 }
7805
7806 if (tls_type == GOT_UNKNOWN)
7807 abort ();
7808
7809 if ((off & 1) != 0)
7810 off &= ~1;
7811 else
7812 {
7813 bfd_boolean need_relocs = FALSE;
7814 Elf_Internal_Rela outrel;
7815 bfd_byte *loc = NULL;
7816 int cur_off = off;
7817
7818 /* The GOT entries have not been initialized yet. Do it
7819 now, and emit any relocations. If both an IE GOT and a
7820 GD GOT are necessary, we emit the GD first. */
7821
7822 if ((info->shared || indx != 0)
7823 && (h == NULL
7824 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7825 || h->root.type != bfd_link_hash_undefweak))
7826 {
7827 need_relocs = TRUE;
7828 if (globals->srelgot == NULL)
7829 abort ();
7830 loc = globals->srelgot->contents;
7831 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7832 }
7833
7834 if (tls_type & GOT_TLS_GD)
7835 {
7836 if (need_relocs)
7837 {
7838 outrel.r_addend = 0;
7839 outrel.r_offset = (globals->sgot->output_section->vma
7840 + globals->sgot->output_offset
7841 + cur_off);
7842 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7843
7844 if (globals->use_rel)
7845 bfd_put_32 (output_bfd, outrel.r_addend,
7846 globals->sgot->contents + cur_off);
7847
7848 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7849 globals->srelgot->reloc_count++;
7850 loc += RELOC_SIZE (globals);
7851
7852 if (indx == 0)
7853 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7854 globals->sgot->contents + cur_off + 4);
7855 else
7856 {
7857 outrel.r_addend = 0;
7858 outrel.r_info = ELF32_R_INFO (indx,
7859 R_ARM_TLS_DTPOFF32);
7860 outrel.r_offset += 4;
7861
7862 if (globals->use_rel)
7863 bfd_put_32 (output_bfd, outrel.r_addend,
7864 globals->sgot->contents + cur_off + 4);
7865
7866
7867 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7868 globals->srelgot->reloc_count++;
7869 loc += RELOC_SIZE (globals);
7870 }
7871 }
7872 else
7873 {
7874 /* If we are not emitting relocations for a
7875 general dynamic reference, then we must be in a
7876 static link or an executable link with the
7877 symbol binding locally. Mark it as belonging
7878 to module 1, the executable. */
7879 bfd_put_32 (output_bfd, 1,
7880 globals->sgot->contents + cur_off);
7881 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7882 globals->sgot->contents + cur_off + 4);
7883 }
7884
7885 cur_off += 8;
7886 }
7887
7888 if (tls_type & GOT_TLS_IE)
7889 {
7890 if (need_relocs)
7891 {
7892 if (indx == 0)
7893 outrel.r_addend = value - dtpoff_base (info);
7894 else
7895 outrel.r_addend = 0;
7896 outrel.r_offset = (globals->sgot->output_section->vma
7897 + globals->sgot->output_offset
7898 + cur_off);
7899 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7900
7901 if (globals->use_rel)
7902 bfd_put_32 (output_bfd, outrel.r_addend,
7903 globals->sgot->contents + cur_off);
7904
7905 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7906 globals->srelgot->reloc_count++;
7907 loc += RELOC_SIZE (globals);
7908 }
7909 else
7910 bfd_put_32 (output_bfd, tpoff (info, value),
7911 globals->sgot->contents + cur_off);
7912 cur_off += 4;
7913 }
7914
7915 if (h != NULL)
7916 h->got.offset |= 1;
7917 else
7918 local_got_offsets[r_symndx] |= 1;
7919 }
7920
7921 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7922 off += 8;
7923 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7924 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7925
7926 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7927 contents, rel->r_offset, value,
7928 rel->r_addend);
7929 }
7930
7931 case R_ARM_TLS_LE32:
7932 if (info->shared)
7933 {
7934 (*_bfd_error_handler)
7935 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7936 input_bfd, input_section,
7937 (long) rel->r_offset, howto->name);
7938 return FALSE;
7939 }
7940 else
7941 value = tpoff (info, value);
7942
7943 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7944 contents, rel->r_offset, value,
7945 rel->r_addend);
7946
7947 case R_ARM_V4BX:
7948 if (globals->fix_v4bx)
7949 {
7950 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7951
7952 /* Ensure that we have a BX instruction. */
7953 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
7954
7955 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
7956 {
7957 /* Branch to veneer. */
7958 bfd_vma glue_addr;
7959 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
7960 glue_addr -= input_section->output_section->vma
7961 + input_section->output_offset
7962 + rel->r_offset + 8;
7963 insn = (insn & 0xf0000000) | 0x0a000000
7964 | ((glue_addr >> 2) & 0x00ffffff);
7965 }
7966 else
7967 {
7968 /* Preserve Rm (lowest four bits) and the condition code
7969 (highest four bits). Other bits encode MOV PC,Rm. */
7970 insn = (insn & 0xf000000f) | 0x01a0f000;
7971 }
7972
7973 bfd_put_32 (input_bfd, insn, hit_data);
7974 }
7975 return bfd_reloc_ok;
7976
7977 case R_ARM_MOVW_ABS_NC:
7978 case R_ARM_MOVT_ABS:
7979 case R_ARM_MOVW_PREL_NC:
7980 case R_ARM_MOVT_PREL:
7981 /* Until we properly support segment-base-relative addressing then
7982 we assume the segment base to be zero, as for the group relocations.
7983 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
7984 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
7985 case R_ARM_MOVW_BREL_NC:
7986 case R_ARM_MOVW_BREL:
7987 case R_ARM_MOVT_BREL:
7988 {
7989 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7990
7991 if (globals->use_rel)
7992 {
7993 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7994 signed_addend = (addend ^ 0x8000) - 0x8000;
7995 }
7996
7997 value += signed_addend;
7998
7999 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8000 value -= (input_section->output_section->vma
8001 + input_section->output_offset + rel->r_offset);
8002
8003 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8004 return bfd_reloc_overflow;
8005
8006 if (sym_flags == STT_ARM_TFUNC)
8007 value |= 1;
8008
8009 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8010 || r_type == R_ARM_MOVT_BREL)
8011 value >>= 16;
8012
8013 insn &= 0xfff0f000;
8014 insn |= value & 0xfff;
8015 insn |= (value & 0xf000) << 4;
8016 bfd_put_32 (input_bfd, insn, hit_data);
8017 }
8018 return bfd_reloc_ok;
8019
8020 case R_ARM_THM_MOVW_ABS_NC:
8021 case R_ARM_THM_MOVT_ABS:
8022 case R_ARM_THM_MOVW_PREL_NC:
8023 case R_ARM_THM_MOVT_PREL:
8024 /* Until we properly support segment-base-relative addressing then
8025 we assume the segment base to be zero, as for the above relocations.
8026 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8027 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8028 as R_ARM_THM_MOVT_ABS. */
8029 case R_ARM_THM_MOVW_BREL_NC:
8030 case R_ARM_THM_MOVW_BREL:
8031 case R_ARM_THM_MOVT_BREL:
8032 {
8033 bfd_vma insn;
8034
8035 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8036 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8037
8038 if (globals->use_rel)
8039 {
8040 addend = ((insn >> 4) & 0xf000)
8041 | ((insn >> 15) & 0x0800)
8042 | ((insn >> 4) & 0x0700)
8043 | (insn & 0x00ff);
8044 signed_addend = (addend ^ 0x8000) - 0x8000;
8045 }
8046
8047 value += signed_addend;
8048
8049 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8050 value -= (input_section->output_section->vma
8051 + input_section->output_offset + rel->r_offset);
8052
8053 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8054 return bfd_reloc_overflow;
8055
8056 if (sym_flags == STT_ARM_TFUNC)
8057 value |= 1;
8058
8059 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8060 || r_type == R_ARM_THM_MOVT_BREL)
8061 value >>= 16;
8062
8063 insn &= 0xfbf08f00;
8064 insn |= (value & 0xf000) << 4;
8065 insn |= (value & 0x0800) << 15;
8066 insn |= (value & 0x0700) << 4;
8067 insn |= (value & 0x00ff);
8068
8069 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8070 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8071 }
8072 return bfd_reloc_ok;
8073
8074 case R_ARM_ALU_PC_G0_NC:
8075 case R_ARM_ALU_PC_G1_NC:
8076 case R_ARM_ALU_PC_G0:
8077 case R_ARM_ALU_PC_G1:
8078 case R_ARM_ALU_PC_G2:
8079 case R_ARM_ALU_SB_G0_NC:
8080 case R_ARM_ALU_SB_G1_NC:
8081 case R_ARM_ALU_SB_G0:
8082 case R_ARM_ALU_SB_G1:
8083 case R_ARM_ALU_SB_G2:
8084 {
8085 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8086 bfd_vma pc = input_section->output_section->vma
8087 + input_section->output_offset + rel->r_offset;
8088 /* sb should be the origin of the *segment* containing the symbol.
8089 It is not clear how to obtain this OS-dependent value, so we
8090 make an arbitrary choice of zero. */
8091 bfd_vma sb = 0;
8092 bfd_vma residual;
8093 bfd_vma g_n;
8094 bfd_signed_vma signed_value;
8095 int group = 0;
8096
8097 /* Determine which group of bits to select. */
8098 switch (r_type)
8099 {
8100 case R_ARM_ALU_PC_G0_NC:
8101 case R_ARM_ALU_PC_G0:
8102 case R_ARM_ALU_SB_G0_NC:
8103 case R_ARM_ALU_SB_G0:
8104 group = 0;
8105 break;
8106
8107 case R_ARM_ALU_PC_G1_NC:
8108 case R_ARM_ALU_PC_G1:
8109 case R_ARM_ALU_SB_G1_NC:
8110 case R_ARM_ALU_SB_G1:
8111 group = 1;
8112 break;
8113
8114 case R_ARM_ALU_PC_G2:
8115 case R_ARM_ALU_SB_G2:
8116 group = 2;
8117 break;
8118
8119 default:
8120 abort ();
8121 }
8122
8123 /* If REL, extract the addend from the insn. If RELA, it will
8124 have already been fetched for us. */
8125 if (globals->use_rel)
8126 {
8127 int negative;
8128 bfd_vma constant = insn & 0xff;
8129 bfd_vma rotation = (insn & 0xf00) >> 8;
8130
8131 if (rotation == 0)
8132 signed_addend = constant;
8133 else
8134 {
8135 /* Compensate for the fact that in the instruction, the
8136 rotation is stored in multiples of 2 bits. */
8137 rotation *= 2;
8138
8139 /* Rotate "constant" right by "rotation" bits. */
8140 signed_addend = (constant >> rotation) |
8141 (constant << (8 * sizeof (bfd_vma) - rotation));
8142 }
8143
8144 /* Determine if the instruction is an ADD or a SUB.
8145 (For REL, this determines the sign of the addend.) */
8146 negative = identify_add_or_sub (insn);
8147 if (negative == 0)
8148 {
8149 (*_bfd_error_handler)
8150 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8151 input_bfd, input_section,
8152 (long) rel->r_offset, howto->name);
8153 return bfd_reloc_overflow;
8154 }
8155
8156 signed_addend *= negative;
8157 }
8158
8159 /* Compute the value (X) to go in the place. */
8160 if (r_type == R_ARM_ALU_PC_G0_NC
8161 || r_type == R_ARM_ALU_PC_G1_NC
8162 || r_type == R_ARM_ALU_PC_G0
8163 || r_type == R_ARM_ALU_PC_G1
8164 || r_type == R_ARM_ALU_PC_G2)
8165 /* PC relative. */
8166 signed_value = value - pc + signed_addend;
8167 else
8168 /* Section base relative. */
8169 signed_value = value - sb + signed_addend;
8170
8171 /* If the target symbol is a Thumb function, then set the
8172 Thumb bit in the address. */
8173 if (sym_flags == STT_ARM_TFUNC)
8174 signed_value |= 1;
8175
8176 /* Calculate the value of the relevant G_n, in encoded
8177 constant-with-rotation format. */
8178 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8179 &residual);
8180
8181 /* Check for overflow if required. */
8182 if ((r_type == R_ARM_ALU_PC_G0
8183 || r_type == R_ARM_ALU_PC_G1
8184 || r_type == R_ARM_ALU_PC_G2
8185 || r_type == R_ARM_ALU_SB_G0
8186 || r_type == R_ARM_ALU_SB_G1
8187 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8188 {
8189 (*_bfd_error_handler)
8190 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8191 input_bfd, input_section,
8192 (long) rel->r_offset, abs (signed_value), howto->name);
8193 return bfd_reloc_overflow;
8194 }
8195
8196 /* Mask out the value and the ADD/SUB part of the opcode; take care
8197 not to destroy the S bit. */
8198 insn &= 0xff1ff000;
8199
8200 /* Set the opcode according to whether the value to go in the
8201 place is negative. */
8202 if (signed_value < 0)
8203 insn |= 1 << 22;
8204 else
8205 insn |= 1 << 23;
8206
8207 /* Encode the offset. */
8208 insn |= g_n;
8209
8210 bfd_put_32 (input_bfd, insn, hit_data);
8211 }
8212 return bfd_reloc_ok;
8213
8214 case R_ARM_LDR_PC_G0:
8215 case R_ARM_LDR_PC_G1:
8216 case R_ARM_LDR_PC_G2:
8217 case R_ARM_LDR_SB_G0:
8218 case R_ARM_LDR_SB_G1:
8219 case R_ARM_LDR_SB_G2:
8220 {
8221 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8222 bfd_vma pc = input_section->output_section->vma
8223 + input_section->output_offset + rel->r_offset;
8224 bfd_vma sb = 0; /* See note above. */
8225 bfd_vma residual;
8226 bfd_signed_vma signed_value;
8227 int group = 0;
8228
8229 /* Determine which groups of bits to calculate. */
8230 switch (r_type)
8231 {
8232 case R_ARM_LDR_PC_G0:
8233 case R_ARM_LDR_SB_G0:
8234 group = 0;
8235 break;
8236
8237 case R_ARM_LDR_PC_G1:
8238 case R_ARM_LDR_SB_G1:
8239 group = 1;
8240 break;
8241
8242 case R_ARM_LDR_PC_G2:
8243 case R_ARM_LDR_SB_G2:
8244 group = 2;
8245 break;
8246
8247 default:
8248 abort ();
8249 }
8250
8251 /* If REL, extract the addend from the insn. If RELA, it will
8252 have already been fetched for us. */
8253 if (globals->use_rel)
8254 {
8255 int negative = (insn & (1 << 23)) ? 1 : -1;
8256 signed_addend = negative * (insn & 0xfff);
8257 }
8258
8259 /* Compute the value (X) to go in the place. */
8260 if (r_type == R_ARM_LDR_PC_G0
8261 || r_type == R_ARM_LDR_PC_G1
8262 || r_type == R_ARM_LDR_PC_G2)
8263 /* PC relative. */
8264 signed_value = value - pc + signed_addend;
8265 else
8266 /* Section base relative. */
8267 signed_value = value - sb + signed_addend;
8268
8269 /* Calculate the value of the relevant G_{n-1} to obtain
8270 the residual at that stage. */
8271 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8272
8273 /* Check for overflow. */
8274 if (residual >= 0x1000)
8275 {
8276 (*_bfd_error_handler)
8277 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8278 input_bfd, input_section,
8279 (long) rel->r_offset, abs (signed_value), howto->name);
8280 return bfd_reloc_overflow;
8281 }
8282
8283 /* Mask out the value and U bit. */
8284 insn &= 0xff7ff000;
8285
8286 /* Set the U bit if the value to go in the place is non-negative. */
8287 if (signed_value >= 0)
8288 insn |= 1 << 23;
8289
8290 /* Encode the offset. */
8291 insn |= residual;
8292
8293 bfd_put_32 (input_bfd, insn, hit_data);
8294 }
8295 return bfd_reloc_ok;
8296
8297 case R_ARM_LDRS_PC_G0:
8298 case R_ARM_LDRS_PC_G1:
8299 case R_ARM_LDRS_PC_G2:
8300 case R_ARM_LDRS_SB_G0:
8301 case R_ARM_LDRS_SB_G1:
8302 case R_ARM_LDRS_SB_G2:
8303 {
8304 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8305 bfd_vma pc = input_section->output_section->vma
8306 + input_section->output_offset + rel->r_offset;
8307 bfd_vma sb = 0; /* See note above. */
8308 bfd_vma residual;
8309 bfd_signed_vma signed_value;
8310 int group = 0;
8311
8312 /* Determine which groups of bits to calculate. */
8313 switch (r_type)
8314 {
8315 case R_ARM_LDRS_PC_G0:
8316 case R_ARM_LDRS_SB_G0:
8317 group = 0;
8318 break;
8319
8320 case R_ARM_LDRS_PC_G1:
8321 case R_ARM_LDRS_SB_G1:
8322 group = 1;
8323 break;
8324
8325 case R_ARM_LDRS_PC_G2:
8326 case R_ARM_LDRS_SB_G2:
8327 group = 2;
8328 break;
8329
8330 default:
8331 abort ();
8332 }
8333
8334 /* If REL, extract the addend from the insn. If RELA, it will
8335 have already been fetched for us. */
8336 if (globals->use_rel)
8337 {
8338 int negative = (insn & (1 << 23)) ? 1 : -1;
8339 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8340 }
8341
8342 /* Compute the value (X) to go in the place. */
8343 if (r_type == R_ARM_LDRS_PC_G0
8344 || r_type == R_ARM_LDRS_PC_G1
8345 || r_type == R_ARM_LDRS_PC_G2)
8346 /* PC relative. */
8347 signed_value = value - pc + signed_addend;
8348 else
8349 /* Section base relative. */
8350 signed_value = value - sb + signed_addend;
8351
8352 /* Calculate the value of the relevant G_{n-1} to obtain
8353 the residual at that stage. */
8354 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8355
8356 /* Check for overflow. */
8357 if (residual >= 0x100)
8358 {
8359 (*_bfd_error_handler)
8360 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8361 input_bfd, input_section,
8362 (long) rel->r_offset, abs (signed_value), howto->name);
8363 return bfd_reloc_overflow;
8364 }
8365
8366 /* Mask out the value and U bit. */
8367 insn &= 0xff7ff0f0;
8368
8369 /* Set the U bit if the value to go in the place is non-negative. */
8370 if (signed_value >= 0)
8371 insn |= 1 << 23;
8372
8373 /* Encode the offset. */
8374 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8375
8376 bfd_put_32 (input_bfd, insn, hit_data);
8377 }
8378 return bfd_reloc_ok;
8379
8380 case R_ARM_LDC_PC_G0:
8381 case R_ARM_LDC_PC_G1:
8382 case R_ARM_LDC_PC_G2:
8383 case R_ARM_LDC_SB_G0:
8384 case R_ARM_LDC_SB_G1:
8385 case R_ARM_LDC_SB_G2:
8386 {
8387 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8388 bfd_vma pc = input_section->output_section->vma
8389 + input_section->output_offset + rel->r_offset;
8390 bfd_vma sb = 0; /* See note above. */
8391 bfd_vma residual;
8392 bfd_signed_vma signed_value;
8393 int group = 0;
8394
8395 /* Determine which groups of bits to calculate. */
8396 switch (r_type)
8397 {
8398 case R_ARM_LDC_PC_G0:
8399 case R_ARM_LDC_SB_G0:
8400 group = 0;
8401 break;
8402
8403 case R_ARM_LDC_PC_G1:
8404 case R_ARM_LDC_SB_G1:
8405 group = 1;
8406 break;
8407
8408 case R_ARM_LDC_PC_G2:
8409 case R_ARM_LDC_SB_G2:
8410 group = 2;
8411 break;
8412
8413 default:
8414 abort ();
8415 }
8416
8417 /* If REL, extract the addend from the insn. If RELA, it will
8418 have already been fetched for us. */
8419 if (globals->use_rel)
8420 {
8421 int negative = (insn & (1 << 23)) ? 1 : -1;
8422 signed_addend = negative * ((insn & 0xff) << 2);
8423 }
8424
8425 /* Compute the value (X) to go in the place. */
8426 if (r_type == R_ARM_LDC_PC_G0
8427 || r_type == R_ARM_LDC_PC_G1
8428 || r_type == R_ARM_LDC_PC_G2)
8429 /* PC relative. */
8430 signed_value = value - pc + signed_addend;
8431 else
8432 /* Section base relative. */
8433 signed_value = value - sb + signed_addend;
8434
8435 /* Calculate the value of the relevant G_{n-1} to obtain
8436 the residual at that stage. */
8437 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8438
8439 /* Check for overflow. (The absolute value to go in the place must be
8440 divisible by four and, after having been divided by four, must
8441 fit in eight bits.) */
8442 if ((residual & 0x3) != 0 || residual >= 0x400)
8443 {
8444 (*_bfd_error_handler)
8445 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8446 input_bfd, input_section,
8447 (long) rel->r_offset, abs (signed_value), howto->name);
8448 return bfd_reloc_overflow;
8449 }
8450
8451 /* Mask out the value and U bit. */
8452 insn &= 0xff7fff00;
8453
8454 /* Set the U bit if the value to go in the place is non-negative. */
8455 if (signed_value >= 0)
8456 insn |= 1 << 23;
8457
8458 /* Encode the offset. */
8459 insn |= residual >> 2;
8460
8461 bfd_put_32 (input_bfd, insn, hit_data);
8462 }
8463 return bfd_reloc_ok;
8464
8465 default:
8466 return bfd_reloc_notsupported;
8467 }
8468 }
8469
8470 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8471 static void
8472 arm_add_to_rel (bfd * abfd,
8473 bfd_byte * address,
8474 reloc_howto_type * howto,
8475 bfd_signed_vma increment)
8476 {
8477 bfd_signed_vma addend;
8478
8479 if (howto->type == R_ARM_THM_CALL
8480 || howto->type == R_ARM_THM_JUMP24)
8481 {
8482 int upper_insn, lower_insn;
8483 int upper, lower;
8484
8485 upper_insn = bfd_get_16 (abfd, address);
8486 lower_insn = bfd_get_16 (abfd, address + 2);
8487 upper = upper_insn & 0x7ff;
8488 lower = lower_insn & 0x7ff;
8489
8490 addend = (upper << 12) | (lower << 1);
8491 addend += increment;
8492 addend >>= 1;
8493
8494 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8495 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8496
8497 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8498 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8499 }
8500 else
8501 {
8502 bfd_vma contents;
8503
8504 contents = bfd_get_32 (abfd, address);
8505
8506 /* Get the (signed) value from the instruction. */
8507 addend = contents & howto->src_mask;
8508 if (addend & ((howto->src_mask + 1) >> 1))
8509 {
8510 bfd_signed_vma mask;
8511
8512 mask = -1;
8513 mask &= ~ howto->src_mask;
8514 addend |= mask;
8515 }
8516
8517 /* Add in the increment, (which is a byte value). */
8518 switch (howto->type)
8519 {
8520 default:
8521 addend += increment;
8522 break;
8523
8524 case R_ARM_PC24:
8525 case R_ARM_PLT32:
8526 case R_ARM_CALL:
8527 case R_ARM_JUMP24:
8528 addend <<= howto->size;
8529 addend += increment;
8530
8531 /* Should we check for overflow here ? */
8532
8533 /* Drop any undesired bits. */
8534 addend >>= howto->rightshift;
8535 break;
8536 }
8537
8538 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8539
8540 bfd_put_32 (abfd, contents, address);
8541 }
8542 }
8543
8544 #define IS_ARM_TLS_RELOC(R_TYPE) \
8545 ((R_TYPE) == R_ARM_TLS_GD32 \
8546 || (R_TYPE) == R_ARM_TLS_LDO32 \
8547 || (R_TYPE) == R_ARM_TLS_LDM32 \
8548 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8549 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8550 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8551 || (R_TYPE) == R_ARM_TLS_LE32 \
8552 || (R_TYPE) == R_ARM_TLS_IE32)
8553
8554 /* Relocate an ARM ELF section. */
8555
8556 static bfd_boolean
8557 elf32_arm_relocate_section (bfd * output_bfd,
8558 struct bfd_link_info * info,
8559 bfd * input_bfd,
8560 asection * input_section,
8561 bfd_byte * contents,
8562 Elf_Internal_Rela * relocs,
8563 Elf_Internal_Sym * local_syms,
8564 asection ** local_sections)
8565 {
8566 Elf_Internal_Shdr *symtab_hdr;
8567 struct elf_link_hash_entry **sym_hashes;
8568 Elf_Internal_Rela *rel;
8569 Elf_Internal_Rela *relend;
8570 const char *name;
8571 struct elf32_arm_link_hash_table * globals;
8572
8573 globals = elf32_arm_hash_table (info);
8574
8575 symtab_hdr = & elf_symtab_hdr (input_bfd);
8576 sym_hashes = elf_sym_hashes (input_bfd);
8577
8578 rel = relocs;
8579 relend = relocs + input_section->reloc_count;
8580 for (; rel < relend; rel++)
8581 {
8582 int r_type;
8583 reloc_howto_type * howto;
8584 unsigned long r_symndx;
8585 Elf_Internal_Sym * sym;
8586 asection * sec;
8587 struct elf_link_hash_entry * h;
8588 bfd_vma relocation;
8589 bfd_reloc_status_type r;
8590 arelent bfd_reloc;
8591 char sym_type;
8592 bfd_boolean unresolved_reloc = FALSE;
8593 char *error_message = NULL;
8594
8595 r_symndx = ELF32_R_SYM (rel->r_info);
8596 r_type = ELF32_R_TYPE (rel->r_info);
8597 r_type = arm_real_reloc_type (globals, r_type);
8598
8599 if ( r_type == R_ARM_GNU_VTENTRY
8600 || r_type == R_ARM_GNU_VTINHERIT)
8601 continue;
8602
8603 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8604 howto = bfd_reloc.howto;
8605
8606 h = NULL;
8607 sym = NULL;
8608 sec = NULL;
8609
8610 if (r_symndx < symtab_hdr->sh_info)
8611 {
8612 sym = local_syms + r_symndx;
8613 sym_type = ELF32_ST_TYPE (sym->st_info);
8614 sec = local_sections[r_symndx];
8615 if (globals->use_rel)
8616 {
8617 relocation = (sec->output_section->vma
8618 + sec->output_offset
8619 + sym->st_value);
8620 if (!info->relocatable
8621 && (sec->flags & SEC_MERGE)
8622 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8623 {
8624 asection *msec;
8625 bfd_vma addend, value;
8626
8627 switch (r_type)
8628 {
8629 case R_ARM_MOVW_ABS_NC:
8630 case R_ARM_MOVT_ABS:
8631 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8632 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8633 addend = (addend ^ 0x8000) - 0x8000;
8634 break;
8635
8636 case R_ARM_THM_MOVW_ABS_NC:
8637 case R_ARM_THM_MOVT_ABS:
8638 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8639 << 16;
8640 value |= bfd_get_16 (input_bfd,
8641 contents + rel->r_offset + 2);
8642 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8643 | ((value & 0x04000000) >> 15);
8644 addend = (addend ^ 0x8000) - 0x8000;
8645 break;
8646
8647 default:
8648 if (howto->rightshift
8649 || (howto->src_mask & (howto->src_mask + 1)))
8650 {
8651 (*_bfd_error_handler)
8652 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8653 input_bfd, input_section,
8654 (long) rel->r_offset, howto->name);
8655 return FALSE;
8656 }
8657
8658 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8659
8660 /* Get the (signed) value from the instruction. */
8661 addend = value & howto->src_mask;
8662 if (addend & ((howto->src_mask + 1) >> 1))
8663 {
8664 bfd_signed_vma mask;
8665
8666 mask = -1;
8667 mask &= ~ howto->src_mask;
8668 addend |= mask;
8669 }
8670 break;
8671 }
8672
8673 msec = sec;
8674 addend =
8675 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8676 - relocation;
8677 addend += msec->output_section->vma + msec->output_offset;
8678
8679 /* Cases here must match those in the preceeding
8680 switch statement. */
8681 switch (r_type)
8682 {
8683 case R_ARM_MOVW_ABS_NC:
8684 case R_ARM_MOVT_ABS:
8685 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8686 | (addend & 0xfff);
8687 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8688 break;
8689
8690 case R_ARM_THM_MOVW_ABS_NC:
8691 case R_ARM_THM_MOVT_ABS:
8692 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8693 | (addend & 0xff) | ((addend & 0x0800) << 15);
8694 bfd_put_16 (input_bfd, value >> 16,
8695 contents + rel->r_offset);
8696 bfd_put_16 (input_bfd, value,
8697 contents + rel->r_offset + 2);
8698 break;
8699
8700 default:
8701 value = (value & ~ howto->dst_mask)
8702 | (addend & howto->dst_mask);
8703 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8704 break;
8705 }
8706 }
8707 }
8708 else
8709 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8710 }
8711 else
8712 {
8713 bfd_boolean warned;
8714
8715 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8716 r_symndx, symtab_hdr, sym_hashes,
8717 h, sec, relocation,
8718 unresolved_reloc, warned);
8719
8720 sym_type = h->type;
8721 }
8722
8723 if (sec != NULL && elf_discarded_section (sec))
8724 {
8725 /* For relocs against symbols from removed linkonce sections,
8726 or sections discarded by a linker script, we just want the
8727 section contents zeroed. Avoid any special processing. */
8728 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8729 rel->r_info = 0;
8730 rel->r_addend = 0;
8731 continue;
8732 }
8733
8734 if (info->relocatable)
8735 {
8736 /* This is a relocatable link. We don't have to change
8737 anything, unless the reloc is against a section symbol,
8738 in which case we have to adjust according to where the
8739 section symbol winds up in the output section. */
8740 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8741 {
8742 if (globals->use_rel)
8743 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8744 howto, (bfd_signed_vma) sec->output_offset);
8745 else
8746 rel->r_addend += sec->output_offset;
8747 }
8748 continue;
8749 }
8750
8751 if (h != NULL)
8752 name = h->root.root.string;
8753 else
8754 {
8755 name = (bfd_elf_string_from_elf_section
8756 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8757 if (name == NULL || *name == '\0')
8758 name = bfd_section_name (input_bfd, sec);
8759 }
8760
8761 if (r_symndx != 0
8762 && r_type != R_ARM_NONE
8763 && (h == NULL
8764 || h->root.type == bfd_link_hash_defined
8765 || h->root.type == bfd_link_hash_defweak)
8766 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8767 {
8768 (*_bfd_error_handler)
8769 ((sym_type == STT_TLS
8770 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8771 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8772 input_bfd,
8773 input_section,
8774 (long) rel->r_offset,
8775 howto->name,
8776 name);
8777 }
8778
8779 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8780 input_section, contents, rel,
8781 relocation, info, sec, name,
8782 (h ? ELF_ST_TYPE (h->type) :
8783 ELF_ST_TYPE (sym->st_info)), h,
8784 &unresolved_reloc, &error_message);
8785
8786 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8787 because such sections are not SEC_ALLOC and thus ld.so will
8788 not process them. */
8789 if (unresolved_reloc
8790 && !((input_section->flags & SEC_DEBUGGING) != 0
8791 && h->def_dynamic))
8792 {
8793 (*_bfd_error_handler)
8794 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8795 input_bfd,
8796 input_section,
8797 (long) rel->r_offset,
8798 howto->name,
8799 h->root.root.string);
8800 return FALSE;
8801 }
8802
8803 if (r != bfd_reloc_ok)
8804 {
8805 switch (r)
8806 {
8807 case bfd_reloc_overflow:
8808 /* If the overflowing reloc was to an undefined symbol,
8809 we have already printed one error message and there
8810 is no point complaining again. */
8811 if ((! h ||
8812 h->root.type != bfd_link_hash_undefined)
8813 && (!((*info->callbacks->reloc_overflow)
8814 (info, (h ? &h->root : NULL), name, howto->name,
8815 (bfd_vma) 0, input_bfd, input_section,
8816 rel->r_offset))))
8817 return FALSE;
8818 break;
8819
8820 case bfd_reloc_undefined:
8821 if (!((*info->callbacks->undefined_symbol)
8822 (info, name, input_bfd, input_section,
8823 rel->r_offset, TRUE)))
8824 return FALSE;
8825 break;
8826
8827 case bfd_reloc_outofrange:
8828 error_message = _("out of range");
8829 goto common_error;
8830
8831 case bfd_reloc_notsupported:
8832 error_message = _("unsupported relocation");
8833 goto common_error;
8834
8835 case bfd_reloc_dangerous:
8836 /* error_message should already be set. */
8837 goto common_error;
8838
8839 default:
8840 error_message = _("unknown error");
8841 /* Fall through. */
8842
8843 common_error:
8844 BFD_ASSERT (error_message != NULL);
8845 if (!((*info->callbacks->reloc_dangerous)
8846 (info, error_message, input_bfd, input_section,
8847 rel->r_offset)))
8848 return FALSE;
8849 break;
8850 }
8851 }
8852 }
8853
8854 return TRUE;
8855 }
8856
8857 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8858 adds the edit to the start of the list. (The list must be built in order of
8859 ascending INDEX: the function's callers are primarily responsible for
8860 maintaining that condition). */
8861
8862 static void
8863 add_unwind_table_edit (arm_unwind_table_edit **head,
8864 arm_unwind_table_edit **tail,
8865 arm_unwind_edit_type type,
8866 asection *linked_section,
8867 unsigned int index)
8868 {
8869 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8870
8871 new_edit->type = type;
8872 new_edit->linked_section = linked_section;
8873 new_edit->index = index;
8874
8875 if (index > 0)
8876 {
8877 new_edit->next = NULL;
8878
8879 if (*tail)
8880 (*tail)->next = new_edit;
8881
8882 (*tail) = new_edit;
8883
8884 if (!*head)
8885 (*head) = new_edit;
8886 }
8887 else
8888 {
8889 new_edit->next = *head;
8890
8891 if (!*tail)
8892 *tail = new_edit;
8893
8894 *head = new_edit;
8895 }
8896 }
8897
8898 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8899
8900 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8901 static void
8902 adjust_exidx_size(asection *exidx_sec, int adjust)
8903 {
8904 asection *out_sec;
8905
8906 if (!exidx_sec->rawsize)
8907 exidx_sec->rawsize = exidx_sec->size;
8908
8909 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8910 out_sec = exidx_sec->output_section;
8911 /* Adjust size of output section. */
8912 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8913 }
8914
8915 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8916 static void
8917 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8918 {
8919 struct _arm_elf_section_data *exidx_arm_data;
8920
8921 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8922 add_unwind_table_edit (
8923 &exidx_arm_data->u.exidx.unwind_edit_list,
8924 &exidx_arm_data->u.exidx.unwind_edit_tail,
8925 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8926
8927 adjust_exidx_size(exidx_sec, 8);
8928 }
8929
8930 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8931 made to those tables, such that:
8932
8933 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8934 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8935 codes which have been inlined into the index).
8936
8937 The edits are applied when the tables are written
8938 (in elf32_arm_write_section).
8939 */
8940
8941 bfd_boolean
8942 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8943 unsigned int num_text_sections,
8944 struct bfd_link_info *info)
8945 {
8946 bfd *inp;
8947 unsigned int last_second_word = 0, i;
8948 asection *last_exidx_sec = NULL;
8949 asection *last_text_sec = NULL;
8950 int last_unwind_type = -1;
8951
8952 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
8953 text sections. */
8954 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
8955 {
8956 asection *sec;
8957
8958 for (sec = inp->sections; sec != NULL; sec = sec->next)
8959 {
8960 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
8961 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
8962
8963 if (hdr->sh_type != SHT_ARM_EXIDX)
8964 continue;
8965
8966 if (elf_sec->linked_to)
8967 {
8968 Elf_Internal_Shdr *linked_hdr
8969 = &elf_section_data (elf_sec->linked_to)->this_hdr;
8970 struct _arm_elf_section_data *linked_sec_arm_data
8971 = get_arm_elf_section_data (linked_hdr->bfd_section);
8972
8973 if (linked_sec_arm_data == NULL)
8974 continue;
8975
8976 /* Link this .ARM.exidx section back from the text section it
8977 describes. */
8978 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
8979 }
8980 }
8981 }
8982
8983 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
8984 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
8985 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
8986 */
8987
8988 for (i = 0; i < num_text_sections; i++)
8989 {
8990 asection *sec = text_section_order[i];
8991 asection *exidx_sec;
8992 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
8993 struct _arm_elf_section_data *exidx_arm_data;
8994 bfd_byte *contents = NULL;
8995 int deleted_exidx_bytes = 0;
8996 bfd_vma j;
8997 arm_unwind_table_edit *unwind_edit_head = NULL;
8998 arm_unwind_table_edit *unwind_edit_tail = NULL;
8999 Elf_Internal_Shdr *hdr;
9000 bfd *ibfd;
9001
9002 if (arm_data == NULL)
9003 continue;
9004
9005 exidx_sec = arm_data->u.text.arm_exidx_sec;
9006 if (exidx_sec == NULL)
9007 {
9008 /* Section has no unwind data. */
9009 if (last_unwind_type == 0 || !last_exidx_sec)
9010 continue;
9011
9012 /* Ignore zero sized sections. */
9013 if (sec->size == 0)
9014 continue;
9015
9016 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9017 last_unwind_type = 0;
9018 continue;
9019 }
9020
9021 /* Skip /DISCARD/ sections. */
9022 if (bfd_is_abs_section (exidx_sec->output_section))
9023 continue;
9024
9025 hdr = &elf_section_data (exidx_sec)->this_hdr;
9026 if (hdr->sh_type != SHT_ARM_EXIDX)
9027 continue;
9028
9029 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9030 if (exidx_arm_data == NULL)
9031 continue;
9032
9033 ibfd = exidx_sec->owner;
9034
9035 if (hdr->contents != NULL)
9036 contents = hdr->contents;
9037 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9038 /* An error? */
9039 continue;
9040
9041 for (j = 0; j < hdr->sh_size; j += 8)
9042 {
9043 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9044 int unwind_type;
9045 int elide = 0;
9046
9047 /* An EXIDX_CANTUNWIND entry. */
9048 if (second_word == 1)
9049 {
9050 if (last_unwind_type == 0)
9051 elide = 1;
9052 unwind_type = 0;
9053 }
9054 /* Inlined unwinding data. Merge if equal to previous. */
9055 else if ((second_word & 0x80000000) != 0)
9056 {
9057 if (last_second_word == second_word && last_unwind_type == 1)
9058 elide = 1;
9059 unwind_type = 1;
9060 last_second_word = second_word;
9061 }
9062 /* Normal table entry. In theory we could merge these too,
9063 but duplicate entries are likely to be much less common. */
9064 else
9065 unwind_type = 2;
9066
9067 if (elide)
9068 {
9069 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9070 DELETE_EXIDX_ENTRY, NULL, j / 8);
9071
9072 deleted_exidx_bytes += 8;
9073 }
9074
9075 last_unwind_type = unwind_type;
9076 }
9077
9078 /* Free contents if we allocated it ourselves. */
9079 if (contents != hdr->contents)
9080 free (contents);
9081
9082 /* Record edits to be applied later (in elf32_arm_write_section). */
9083 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9084 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9085
9086 if (deleted_exidx_bytes > 0)
9087 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9088
9089 last_exidx_sec = exidx_sec;
9090 last_text_sec = sec;
9091 }
9092
9093 /* Add terminating CANTUNWIND entry. */
9094 if (last_exidx_sec && last_unwind_type != 0)
9095 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9096
9097 return TRUE;
9098 }
9099
9100 static bfd_boolean
9101 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9102 bfd *ibfd, const char *name)
9103 {
9104 asection *sec, *osec;
9105
9106 sec = bfd_get_section_by_name (ibfd, name);
9107 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9108 return TRUE;
9109
9110 osec = sec->output_section;
9111 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9112 return TRUE;
9113
9114 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9115 sec->output_offset, sec->size))
9116 return FALSE;
9117
9118 return TRUE;
9119 }
9120
9121 static bfd_boolean
9122 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9123 {
9124 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9125
9126 /* Invoke the regular ELF backend linker to do all the work. */
9127 if (!bfd_elf_final_link (abfd, info))
9128 return FALSE;
9129
9130 /* Write out any glue sections now that we have created all the
9131 stubs. */
9132 if (globals->bfd_of_glue_owner != NULL)
9133 {
9134 if (! elf32_arm_output_glue_section (info, abfd,
9135 globals->bfd_of_glue_owner,
9136 ARM2THUMB_GLUE_SECTION_NAME))
9137 return FALSE;
9138
9139 if (! elf32_arm_output_glue_section (info, abfd,
9140 globals->bfd_of_glue_owner,
9141 THUMB2ARM_GLUE_SECTION_NAME))
9142 return FALSE;
9143
9144 if (! elf32_arm_output_glue_section (info, abfd,
9145 globals->bfd_of_glue_owner,
9146 VFP11_ERRATUM_VENEER_SECTION_NAME))
9147 return FALSE;
9148
9149 if (! elf32_arm_output_glue_section (info, abfd,
9150 globals->bfd_of_glue_owner,
9151 ARM_BX_GLUE_SECTION_NAME))
9152 return FALSE;
9153 }
9154
9155 return TRUE;
9156 }
9157
9158 /* Set the right machine number. */
9159
9160 static bfd_boolean
9161 elf32_arm_object_p (bfd *abfd)
9162 {
9163 unsigned int mach;
9164
9165 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9166
9167 if (mach != bfd_mach_arm_unknown)
9168 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9169
9170 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9171 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9172
9173 else
9174 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9175
9176 return TRUE;
9177 }
9178
9179 /* Function to keep ARM specific flags in the ELF header. */
9180
9181 static bfd_boolean
9182 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9183 {
9184 if (elf_flags_init (abfd)
9185 && elf_elfheader (abfd)->e_flags != flags)
9186 {
9187 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9188 {
9189 if (flags & EF_ARM_INTERWORK)
9190 (*_bfd_error_handler)
9191 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9192 abfd);
9193 else
9194 _bfd_error_handler
9195 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9196 abfd);
9197 }
9198 }
9199 else
9200 {
9201 elf_elfheader (abfd)->e_flags = flags;
9202 elf_flags_init (abfd) = TRUE;
9203 }
9204
9205 return TRUE;
9206 }
9207
9208 /* Copy backend specific data from one object module to another. */
9209
9210 static bfd_boolean
9211 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9212 {
9213 flagword in_flags;
9214 flagword out_flags;
9215
9216 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9217 return TRUE;
9218
9219 in_flags = elf_elfheader (ibfd)->e_flags;
9220 out_flags = elf_elfheader (obfd)->e_flags;
9221
9222 if (elf_flags_init (obfd)
9223 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9224 && in_flags != out_flags)
9225 {
9226 /* Cannot mix APCS26 and APCS32 code. */
9227 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9228 return FALSE;
9229
9230 /* Cannot mix float APCS and non-float APCS code. */
9231 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9232 return FALSE;
9233
9234 /* If the src and dest have different interworking flags
9235 then turn off the interworking bit. */
9236 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9237 {
9238 if (out_flags & EF_ARM_INTERWORK)
9239 _bfd_error_handler
9240 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9241 obfd, ibfd);
9242
9243 in_flags &= ~EF_ARM_INTERWORK;
9244 }
9245
9246 /* Likewise for PIC, though don't warn for this case. */
9247 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9248 in_flags &= ~EF_ARM_PIC;
9249 }
9250
9251 elf_elfheader (obfd)->e_flags = in_flags;
9252 elf_flags_init (obfd) = TRUE;
9253
9254 /* Also copy the EI_OSABI field. */
9255 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9256 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9257
9258 /* Copy object attributes. */
9259 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9260
9261 return TRUE;
9262 }
9263
9264 /* Values for Tag_ABI_PCS_R9_use. */
9265 enum
9266 {
9267 AEABI_R9_V6,
9268 AEABI_R9_SB,
9269 AEABI_R9_TLS,
9270 AEABI_R9_unused
9271 };
9272
9273 /* Values for Tag_ABI_PCS_RW_data. */
9274 enum
9275 {
9276 AEABI_PCS_RW_data_absolute,
9277 AEABI_PCS_RW_data_PCrel,
9278 AEABI_PCS_RW_data_SBrel,
9279 AEABI_PCS_RW_data_unused
9280 };
9281
9282 /* Values for Tag_ABI_enum_size. */
9283 enum
9284 {
9285 AEABI_enum_unused,
9286 AEABI_enum_short,
9287 AEABI_enum_wide,
9288 AEABI_enum_forced_wide
9289 };
9290
9291 /* Determine whether an object attribute tag takes an integer, a
9292 string or both. */
9293
9294 static int
9295 elf32_arm_obj_attrs_arg_type (int tag)
9296 {
9297 if (tag == Tag_compatibility)
9298 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9299 else if (tag == Tag_nodefaults)
9300 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9301 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9302 return ATTR_TYPE_FLAG_STR_VAL;
9303 else if (tag < 32)
9304 return ATTR_TYPE_FLAG_INT_VAL;
9305 else
9306 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9307 }
9308
9309 /* The ABI defines that Tag_conformance should be emitted first, and that
9310 Tag_nodefaults should be second (if either is defined). This sets those
9311 two positions, and bumps up the position of all the remaining tags to
9312 compensate. */
9313 static int
9314 elf32_arm_obj_attrs_order (int num)
9315 {
9316 if (num == 4)
9317 return Tag_conformance;
9318 if (num == 5)
9319 return Tag_nodefaults;
9320 if ((num - 2) < Tag_nodefaults)
9321 return num - 2;
9322 if ((num - 1) < Tag_conformance)
9323 return num - 1;
9324 return num;
9325 }
9326
9327 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9328 Returns -1 if no architecture could be read. */
9329
9330 static int
9331 get_secondary_compatible_arch (bfd *abfd)
9332 {
9333 obj_attribute *attr =
9334 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9335
9336 /* Note: the tag and its argument below are uleb128 values, though
9337 currently-defined values fit in one byte for each. */
9338 if (attr->s
9339 && attr->s[0] == Tag_CPU_arch
9340 && (attr->s[1] & 128) != 128
9341 && attr->s[2] == 0)
9342 return attr->s[1];
9343
9344 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9345 return -1;
9346 }
9347
9348 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9349 The tag is removed if ARCH is -1. */
9350
9351 static void
9352 set_secondary_compatible_arch (bfd *abfd, int arch)
9353 {
9354 obj_attribute *attr =
9355 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9356
9357 if (arch == -1)
9358 {
9359 attr->s = NULL;
9360 return;
9361 }
9362
9363 /* Note: the tag and its argument below are uleb128 values, though
9364 currently-defined values fit in one byte for each. */
9365 if (!attr->s)
9366 attr->s = bfd_alloc (abfd, 3);
9367 attr->s[0] = Tag_CPU_arch;
9368 attr->s[1] = arch;
9369 attr->s[2] = '\0';
9370 }
9371
9372 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9373 into account. */
9374
9375 static int
9376 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9377 int newtag, int secondary_compat)
9378 {
9379 #define T(X) TAG_CPU_ARCH_##X
9380 int tagl, tagh, result;
9381 const int v6t2[] =
9382 {
9383 T(V6T2), /* PRE_V4. */
9384 T(V6T2), /* V4. */
9385 T(V6T2), /* V4T. */
9386 T(V6T2), /* V5T. */
9387 T(V6T2), /* V5TE. */
9388 T(V6T2), /* V5TEJ. */
9389 T(V6T2), /* V6. */
9390 T(V7), /* V6KZ. */
9391 T(V6T2) /* V6T2. */
9392 };
9393 const int v6k[] =
9394 {
9395 T(V6K), /* PRE_V4. */
9396 T(V6K), /* V4. */
9397 T(V6K), /* V4T. */
9398 T(V6K), /* V5T. */
9399 T(V6K), /* V5TE. */
9400 T(V6K), /* V5TEJ. */
9401 T(V6K), /* V6. */
9402 T(V6KZ), /* V6KZ. */
9403 T(V7), /* V6T2. */
9404 T(V6K) /* V6K. */
9405 };
9406 const int v7[] =
9407 {
9408 T(V7), /* PRE_V4. */
9409 T(V7), /* V4. */
9410 T(V7), /* V4T. */
9411 T(V7), /* V5T. */
9412 T(V7), /* V5TE. */
9413 T(V7), /* V5TEJ. */
9414 T(V7), /* V6. */
9415 T(V7), /* V6KZ. */
9416 T(V7), /* V6T2. */
9417 T(V7), /* V6K. */
9418 T(V7) /* V7. */
9419 };
9420 const int v6_m[] =
9421 {
9422 -1, /* PRE_V4. */
9423 -1, /* V4. */
9424 T(V6K), /* V4T. */
9425 T(V6K), /* V5T. */
9426 T(V6K), /* V5TE. */
9427 T(V6K), /* V5TEJ. */
9428 T(V6K), /* V6. */
9429 T(V6KZ), /* V6KZ. */
9430 T(V7), /* V6T2. */
9431 T(V6K), /* V6K. */
9432 T(V7), /* V7. */
9433 T(V6_M) /* V6_M. */
9434 };
9435 const int v6s_m[] =
9436 {
9437 -1, /* PRE_V4. */
9438 -1, /* V4. */
9439 T(V6K), /* V4T. */
9440 T(V6K), /* V5T. */
9441 T(V6K), /* V5TE. */
9442 T(V6K), /* V5TEJ. */
9443 T(V6K), /* V6. */
9444 T(V6KZ), /* V6KZ. */
9445 T(V7), /* V6T2. */
9446 T(V6K), /* V6K. */
9447 T(V7), /* V7. */
9448 T(V6S_M), /* V6_M. */
9449 T(V6S_M) /* V6S_M. */
9450 };
9451 const int v4t_plus_v6_m[] =
9452 {
9453 -1, /* PRE_V4. */
9454 -1, /* V4. */
9455 T(V4T), /* V4T. */
9456 T(V5T), /* V5T. */
9457 T(V5TE), /* V5TE. */
9458 T(V5TEJ), /* V5TEJ. */
9459 T(V6), /* V6. */
9460 T(V6KZ), /* V6KZ. */
9461 T(V6T2), /* V6T2. */
9462 T(V6K), /* V6K. */
9463 T(V7), /* V7. */
9464 T(V6_M), /* V6_M. */
9465 T(V6S_M), /* V6S_M. */
9466 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9467 };
9468 const int *comb[] =
9469 {
9470 v6t2,
9471 v6k,
9472 v7,
9473 v6_m,
9474 v6s_m,
9475 /* Pseudo-architecture. */
9476 v4t_plus_v6_m
9477 };
9478
9479 /* Check we've not got a higher architecture than we know about. */
9480
9481 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9482 {
9483 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9484 return -1;
9485 }
9486
9487 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9488
9489 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9490 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9491 oldtag = T(V4T_PLUS_V6_M);
9492
9493 /* And override the new tag if we have a Tag_also_compatible_with on the
9494 input. */
9495
9496 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9497 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9498 newtag = T(V4T_PLUS_V6_M);
9499
9500 tagl = (oldtag < newtag) ? oldtag : newtag;
9501 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9502
9503 /* Architectures before V6KZ add features monotonically. */
9504 if (tagh <= TAG_CPU_ARCH_V6KZ)
9505 return result;
9506
9507 result = comb[tagh - T(V6T2)][tagl];
9508
9509 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9510 as the canonical version. */
9511 if (result == T(V4T_PLUS_V6_M))
9512 {
9513 result = T(V4T);
9514 *secondary_compat_out = T(V6_M);
9515 }
9516 else
9517 *secondary_compat_out = -1;
9518
9519 if (result == -1)
9520 {
9521 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9522 ibfd, oldtag, newtag);
9523 return -1;
9524 }
9525
9526 return result;
9527 #undef T
9528 }
9529
9530 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9531 are conflicting attributes. */
9532
9533 static bfd_boolean
9534 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9535 {
9536 obj_attribute *in_attr;
9537 obj_attribute *out_attr;
9538 obj_attribute_list *in_list;
9539 obj_attribute_list *out_list;
9540 obj_attribute_list **out_listp;
9541 /* Some tags have 0 = don't care, 1 = strong requirement,
9542 2 = weak requirement. */
9543 static const int order_021[3] = {0, 2, 1};
9544 /* For use with Tag_VFP_arch. */
9545 static const int order_01243[5] = {0, 1, 2, 4, 3};
9546 int i;
9547 bfd_boolean result = TRUE;
9548
9549 /* Skip the linker stubs file. This preserves previous behavior
9550 of accepting unknown attributes in the first input file - but
9551 is that a bug? */
9552 if (ibfd->flags & BFD_LINKER_CREATED)
9553 return TRUE;
9554
9555 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9556 {
9557 /* This is the first object. Copy the attributes. */
9558 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9559
9560 /* Use the Tag_null value to indicate the attributes have been
9561 initialized. */
9562 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9563
9564 return TRUE;
9565 }
9566
9567 in_attr = elf_known_obj_attributes_proc (ibfd);
9568 out_attr = elf_known_obj_attributes_proc (obfd);
9569 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9570 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9571 {
9572 /* Ignore mismatches if the object doesn't use floating point. */
9573 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9574 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9575 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9576 {
9577 _bfd_error_handler
9578 (_("error: %B uses VFP register arguments, %B does not"),
9579 ibfd, obfd);
9580 result = FALSE;
9581 }
9582 }
9583
9584 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9585 {
9586 /* Merge this attribute with existing attributes. */
9587 switch (i)
9588 {
9589 case Tag_CPU_raw_name:
9590 case Tag_CPU_name:
9591 /* These are merged after Tag_CPU_arch. */
9592 break;
9593
9594 case Tag_ABI_optimization_goals:
9595 case Tag_ABI_FP_optimization_goals:
9596 /* Use the first value seen. */
9597 break;
9598
9599 case Tag_CPU_arch:
9600 {
9601 int secondary_compat = -1, secondary_compat_out = -1;
9602 unsigned int saved_out_attr = out_attr[i].i;
9603 static const char *name_table[] = {
9604 /* These aren't real CPU names, but we can't guess
9605 that from the architecture version alone. */
9606 "Pre v4",
9607 "ARM v4",
9608 "ARM v4T",
9609 "ARM v5T",
9610 "ARM v5TE",
9611 "ARM v5TEJ",
9612 "ARM v6",
9613 "ARM v6KZ",
9614 "ARM v6T2",
9615 "ARM v6K",
9616 "ARM v7",
9617 "ARM v6-M",
9618 "ARM v6S-M"
9619 };
9620
9621 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9622 secondary_compat = get_secondary_compatible_arch (ibfd);
9623 secondary_compat_out = get_secondary_compatible_arch (obfd);
9624 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9625 &secondary_compat_out,
9626 in_attr[i].i,
9627 secondary_compat);
9628 set_secondary_compatible_arch (obfd, secondary_compat_out);
9629
9630 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9631 if (out_attr[i].i == saved_out_attr)
9632 ; /* Leave the names alone. */
9633 else if (out_attr[i].i == in_attr[i].i)
9634 {
9635 /* The output architecture has been changed to match the
9636 input architecture. Use the input names. */
9637 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9638 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9639 : NULL;
9640 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9641 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9642 : NULL;
9643 }
9644 else
9645 {
9646 out_attr[Tag_CPU_name].s = NULL;
9647 out_attr[Tag_CPU_raw_name].s = NULL;
9648 }
9649
9650 /* If we still don't have a value for Tag_CPU_name,
9651 make one up now. Tag_CPU_raw_name remains blank. */
9652 if (out_attr[Tag_CPU_name].s == NULL
9653 && out_attr[i].i < ARRAY_SIZE (name_table))
9654 out_attr[Tag_CPU_name].s =
9655 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9656 }
9657 break;
9658
9659 case Tag_ARM_ISA_use:
9660 case Tag_THUMB_ISA_use:
9661 case Tag_WMMX_arch:
9662 case Tag_Advanced_SIMD_arch:
9663 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9664 case Tag_ABI_FP_rounding:
9665 case Tag_ABI_FP_exceptions:
9666 case Tag_ABI_FP_user_exceptions:
9667 case Tag_ABI_FP_number_model:
9668 case Tag_VFP_HP_extension:
9669 case Tag_CPU_unaligned_access:
9670 case Tag_T2EE_use:
9671 case Tag_Virtualization_use:
9672 case Tag_MPextension_use:
9673 /* Use the largest value specified. */
9674 if (in_attr[i].i > out_attr[i].i)
9675 out_attr[i].i = in_attr[i].i;
9676 break;
9677
9678 case Tag_ABI_align8_preserved:
9679 case Tag_ABI_PCS_RO_data:
9680 /* Use the smallest value specified. */
9681 if (in_attr[i].i < out_attr[i].i)
9682 out_attr[i].i = in_attr[i].i;
9683 break;
9684
9685 case Tag_ABI_align8_needed:
9686 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9687 && (in_attr[Tag_ABI_align8_preserved].i == 0
9688 || out_attr[Tag_ABI_align8_preserved].i == 0))
9689 {
9690 /* This error message should be enabled once all non-conformant
9691 binaries in the toolchain have had the attributes set
9692 properly.
9693 _bfd_error_handler
9694 (_("error: %B: 8-byte data alignment conflicts with %B"),
9695 obfd, ibfd);
9696 result = FALSE; */
9697 }
9698 /* Fall through. */
9699 case Tag_ABI_FP_denormal:
9700 case Tag_ABI_PCS_GOT_use:
9701 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9702 value if greater than 2 (for future-proofing). */
9703 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9704 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9705 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9706 out_attr[i].i = in_attr[i].i;
9707 break;
9708
9709
9710 case Tag_CPU_arch_profile:
9711 if (out_attr[i].i != in_attr[i].i)
9712 {
9713 /* 0 will merge with anything.
9714 'A' and 'S' merge to 'A'.
9715 'R' and 'S' merge to 'R'.
9716 'M' and 'A|R|S' is an error. */
9717 if (out_attr[i].i == 0
9718 || (out_attr[i].i == 'S'
9719 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9720 out_attr[i].i = in_attr[i].i;
9721 else if (in_attr[i].i == 0
9722 || (in_attr[i].i == 'S'
9723 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9724 ; /* Do nothing. */
9725 else
9726 {
9727 _bfd_error_handler
9728 (_("error: %B: Conflicting architecture profiles %c/%c"),
9729 ibfd,
9730 in_attr[i].i ? in_attr[i].i : '0',
9731 out_attr[i].i ? out_attr[i].i : '0');
9732 result = FALSE;
9733 }
9734 }
9735 break;
9736 case Tag_VFP_arch:
9737 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9738 largest value if greater than 4 (for future-proofing). */
9739 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9740 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9741 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9742 out_attr[i].i = in_attr[i].i;
9743 break;
9744 case Tag_PCS_config:
9745 if (out_attr[i].i == 0)
9746 out_attr[i].i = in_attr[i].i;
9747 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9748 {
9749 /* It's sometimes ok to mix different configs, so this is only
9750 a warning. */
9751 _bfd_error_handler
9752 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9753 }
9754 break;
9755 case Tag_ABI_PCS_R9_use:
9756 if (in_attr[i].i != out_attr[i].i
9757 && out_attr[i].i != AEABI_R9_unused
9758 && in_attr[i].i != AEABI_R9_unused)
9759 {
9760 _bfd_error_handler
9761 (_("error: %B: Conflicting use of R9"), ibfd);
9762 result = FALSE;
9763 }
9764 if (out_attr[i].i == AEABI_R9_unused)
9765 out_attr[i].i = in_attr[i].i;
9766 break;
9767 case Tag_ABI_PCS_RW_data:
9768 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9769 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9770 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9771 {
9772 _bfd_error_handler
9773 (_("error: %B: SB relative addressing conflicts with use of R9"),
9774 ibfd);
9775 result = FALSE;
9776 }
9777 /* Use the smallest value specified. */
9778 if (in_attr[i].i < out_attr[i].i)
9779 out_attr[i].i = in_attr[i].i;
9780 break;
9781 case Tag_ABI_PCS_wchar_t:
9782 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9783 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9784 {
9785 _bfd_error_handler
9786 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9787 ibfd, in_attr[i].i, out_attr[i].i);
9788 }
9789 else if (in_attr[i].i && !out_attr[i].i)
9790 out_attr[i].i = in_attr[i].i;
9791 break;
9792 case Tag_ABI_enum_size:
9793 if (in_attr[i].i != AEABI_enum_unused)
9794 {
9795 if (out_attr[i].i == AEABI_enum_unused
9796 || out_attr[i].i == AEABI_enum_forced_wide)
9797 {
9798 /* The existing object is compatible with anything.
9799 Use whatever requirements the new object has. */
9800 out_attr[i].i = in_attr[i].i;
9801 }
9802 else if (in_attr[i].i != AEABI_enum_forced_wide
9803 && out_attr[i].i != in_attr[i].i
9804 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9805 {
9806 static const char *aeabi_enum_names[] =
9807 { "", "variable-size", "32-bit", "" };
9808 const char *in_name =
9809 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9810 ? aeabi_enum_names[in_attr[i].i]
9811 : "<unknown>";
9812 const char *out_name =
9813 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9814 ? aeabi_enum_names[out_attr[i].i]
9815 : "<unknown>";
9816 _bfd_error_handler
9817 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9818 ibfd, in_name, out_name);
9819 }
9820 }
9821 break;
9822 case Tag_ABI_VFP_args:
9823 /* Aready done. */
9824 break;
9825 case Tag_ABI_WMMX_args:
9826 if (in_attr[i].i != out_attr[i].i)
9827 {
9828 _bfd_error_handler
9829 (_("error: %B uses iWMMXt register arguments, %B does not"),
9830 ibfd, obfd);
9831 result = FALSE;
9832 }
9833 break;
9834 case Tag_compatibility:
9835 /* Merged in target-independent code. */
9836 break;
9837 case Tag_ABI_HardFP_use:
9838 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9839 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9840 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9841 out_attr[i].i = 3;
9842 else if (in_attr[i].i > out_attr[i].i)
9843 out_attr[i].i = in_attr[i].i;
9844 break;
9845 case Tag_ABI_FP_16bit_format:
9846 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9847 {
9848 if (in_attr[i].i != out_attr[i].i)
9849 {
9850 _bfd_error_handler
9851 (_("error: fp16 format mismatch between %B and %B"),
9852 ibfd, obfd);
9853 result = FALSE;
9854 }
9855 }
9856 if (in_attr[i].i != 0)
9857 out_attr[i].i = in_attr[i].i;
9858 break;
9859
9860 case Tag_nodefaults:
9861 /* This tag is set if it exists, but the value is unused (and is
9862 typically zero). We don't actually need to do anything here -
9863 the merge happens automatically when the type flags are merged
9864 below. */
9865 break;
9866 case Tag_also_compatible_with:
9867 /* Already done in Tag_CPU_arch. */
9868 break;
9869 case Tag_conformance:
9870 /* Keep the attribute if it matches. Throw it away otherwise.
9871 No attribute means no claim to conform. */
9872 if (!in_attr[i].s || !out_attr[i].s
9873 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9874 out_attr[i].s = NULL;
9875 break;
9876
9877 default:
9878 {
9879 bfd *err_bfd = NULL;
9880
9881 /* The "known_obj_attributes" table does contain some undefined
9882 attributes. Ensure that there are unused. */
9883 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9884 err_bfd = obfd;
9885 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9886 err_bfd = ibfd;
9887
9888 if (err_bfd != NULL)
9889 {
9890 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9891 if ((i & 127) < 64)
9892 {
9893 _bfd_error_handler
9894 (_("%B: Unknown mandatory EABI object attribute %d"),
9895 err_bfd, i);
9896 bfd_set_error (bfd_error_bad_value);
9897 result = FALSE;
9898 }
9899 else
9900 {
9901 _bfd_error_handler
9902 (_("Warning: %B: Unknown EABI object attribute %d"),
9903 err_bfd, i);
9904 }
9905 }
9906
9907 /* Only pass on attributes that match in both inputs. */
9908 if (in_attr[i].i != out_attr[i].i
9909 || in_attr[i].s != out_attr[i].s
9910 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9911 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9912 {
9913 out_attr[i].i = 0;
9914 out_attr[i].s = NULL;
9915 }
9916 }
9917 }
9918
9919 /* If out_attr was copied from in_attr then it won't have a type yet. */
9920 if (in_attr[i].type && !out_attr[i].type)
9921 out_attr[i].type = in_attr[i].type;
9922 }
9923
9924 /* Merge Tag_compatibility attributes and any common GNU ones. */
9925 _bfd_elf_merge_object_attributes (ibfd, obfd);
9926
9927 /* Check for any attributes not known on ARM. */
9928 in_list = elf_other_obj_attributes_proc (ibfd);
9929 out_listp = &elf_other_obj_attributes_proc (obfd);
9930 out_list = *out_listp;
9931
9932 for (; in_list || out_list; )
9933 {
9934 bfd *err_bfd = NULL;
9935 int err_tag = 0;
9936
9937 /* The tags for each list are in numerical order. */
9938 /* If the tags are equal, then merge. */
9939 if (out_list && (!in_list || in_list->tag > out_list->tag))
9940 {
9941 /* This attribute only exists in obfd. We can't merge, and we don't
9942 know what the tag means, so delete it. */
9943 err_bfd = obfd;
9944 err_tag = out_list->tag;
9945 *out_listp = out_list->next;
9946 out_list = *out_listp;
9947 }
9948 else if (in_list && (!out_list || in_list->tag < out_list->tag))
9949 {
9950 /* This attribute only exists in ibfd. We can't merge, and we don't
9951 know what the tag means, so ignore it. */
9952 err_bfd = ibfd;
9953 err_tag = in_list->tag;
9954 in_list = in_list->next;
9955 }
9956 else /* The tags are equal. */
9957 {
9958 /* As present, all attributes in the list are unknown, and
9959 therefore can't be merged meaningfully. */
9960 err_bfd = obfd;
9961 err_tag = out_list->tag;
9962
9963 /* Only pass on attributes that match in both inputs. */
9964 if (in_list->attr.i != out_list->attr.i
9965 || in_list->attr.s != out_list->attr.s
9966 || (in_list->attr.s && out_list->attr.s
9967 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
9968 {
9969 /* No match. Delete the attribute. */
9970 *out_listp = out_list->next;
9971 out_list = *out_listp;
9972 }
9973 else
9974 {
9975 /* Matched. Keep the attribute and move to the next. */
9976 out_list = out_list->next;
9977 in_list = in_list->next;
9978 }
9979 }
9980
9981 if (err_bfd)
9982 {
9983 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9984 if ((err_tag & 127) < 64)
9985 {
9986 _bfd_error_handler
9987 (_("%B: Unknown mandatory EABI object attribute %d"),
9988 err_bfd, err_tag);
9989 bfd_set_error (bfd_error_bad_value);
9990 result = FALSE;
9991 }
9992 else
9993 {
9994 _bfd_error_handler
9995 (_("Warning: %B: Unknown EABI object attribute %d"),
9996 err_bfd, err_tag);
9997 }
9998 }
9999 }
10000 return result;
10001 }
10002
10003
10004 /* Return TRUE if the two EABI versions are incompatible. */
10005
10006 static bfd_boolean
10007 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10008 {
10009 /* v4 and v5 are the same spec before and after it was released,
10010 so allow mixing them. */
10011 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10012 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10013 return TRUE;
10014
10015 return (iver == over);
10016 }
10017
10018 /* Merge backend specific data from an object file to the output
10019 object file when linking. */
10020
10021 static bfd_boolean
10022 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10023 {
10024 flagword out_flags;
10025 flagword in_flags;
10026 bfd_boolean flags_compatible = TRUE;
10027 asection *sec;
10028
10029 /* Check if we have the same endianess. */
10030 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10031 return FALSE;
10032
10033 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10034 return TRUE;
10035
10036 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10037 return FALSE;
10038
10039 /* The input BFD must have had its flags initialised. */
10040 /* The following seems bogus to me -- The flags are initialized in
10041 the assembler but I don't think an elf_flags_init field is
10042 written into the object. */
10043 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10044
10045 in_flags = elf_elfheader (ibfd)->e_flags;
10046 out_flags = elf_elfheader (obfd)->e_flags;
10047
10048 /* In theory there is no reason why we couldn't handle this. However
10049 in practice it isn't even close to working and there is no real
10050 reason to want it. */
10051 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10052 && !(ibfd->flags & DYNAMIC)
10053 && (in_flags & EF_ARM_BE8))
10054 {
10055 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10056 ibfd);
10057 return FALSE;
10058 }
10059
10060 if (!elf_flags_init (obfd))
10061 {
10062 /* If the input is the default architecture and had the default
10063 flags then do not bother setting the flags for the output
10064 architecture, instead allow future merges to do this. If no
10065 future merges ever set these flags then they will retain their
10066 uninitialised values, which surprise surprise, correspond
10067 to the default values. */
10068 if (bfd_get_arch_info (ibfd)->the_default
10069 && elf_elfheader (ibfd)->e_flags == 0)
10070 return TRUE;
10071
10072 elf_flags_init (obfd) = TRUE;
10073 elf_elfheader (obfd)->e_flags = in_flags;
10074
10075 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10076 && bfd_get_arch_info (obfd)->the_default)
10077 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10078
10079 return TRUE;
10080 }
10081
10082 /* Determine what should happen if the input ARM architecture
10083 does not match the output ARM architecture. */
10084 if (! bfd_arm_merge_machines (ibfd, obfd))
10085 return FALSE;
10086
10087 /* Identical flags must be compatible. */
10088 if (in_flags == out_flags)
10089 return TRUE;
10090
10091 /* Check to see if the input BFD actually contains any sections. If
10092 not, its flags may not have been initialised either, but it
10093 cannot actually cause any incompatiblity. Do not short-circuit
10094 dynamic objects; their section list may be emptied by
10095 elf_link_add_object_symbols.
10096
10097 Also check to see if there are no code sections in the input.
10098 In this case there is no need to check for code specific flags.
10099 XXX - do we need to worry about floating-point format compatability
10100 in data sections ? */
10101 if (!(ibfd->flags & DYNAMIC))
10102 {
10103 bfd_boolean null_input_bfd = TRUE;
10104 bfd_boolean only_data_sections = TRUE;
10105
10106 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10107 {
10108 /* Ignore synthetic glue sections. */
10109 if (strcmp (sec->name, ".glue_7")
10110 && strcmp (sec->name, ".glue_7t"))
10111 {
10112 if ((bfd_get_section_flags (ibfd, sec)
10113 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10114 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10115 only_data_sections = FALSE;
10116
10117 null_input_bfd = FALSE;
10118 break;
10119 }
10120 }
10121
10122 if (null_input_bfd || only_data_sections)
10123 return TRUE;
10124 }
10125
10126 /* Complain about various flag mismatches. */
10127 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10128 EF_ARM_EABI_VERSION (out_flags)))
10129 {
10130 _bfd_error_handler
10131 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10132 ibfd, obfd,
10133 (in_flags & EF_ARM_EABIMASK) >> 24,
10134 (out_flags & EF_ARM_EABIMASK) >> 24);
10135 return FALSE;
10136 }
10137
10138 /* Not sure what needs to be checked for EABI versions >= 1. */
10139 /* VxWorks libraries do not use these flags. */
10140 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10141 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10142 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10143 {
10144 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10145 {
10146 _bfd_error_handler
10147 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10148 ibfd, obfd,
10149 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10150 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10151 flags_compatible = FALSE;
10152 }
10153
10154 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10155 {
10156 if (in_flags & EF_ARM_APCS_FLOAT)
10157 _bfd_error_handler
10158 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10159 ibfd, obfd);
10160 else
10161 _bfd_error_handler
10162 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10163 ibfd, obfd);
10164
10165 flags_compatible = FALSE;
10166 }
10167
10168 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10169 {
10170 if (in_flags & EF_ARM_VFP_FLOAT)
10171 _bfd_error_handler
10172 (_("error: %B uses VFP instructions, whereas %B does not"),
10173 ibfd, obfd);
10174 else
10175 _bfd_error_handler
10176 (_("error: %B uses FPA instructions, whereas %B does not"),
10177 ibfd, obfd);
10178
10179 flags_compatible = FALSE;
10180 }
10181
10182 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10183 {
10184 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10185 _bfd_error_handler
10186 (_("error: %B uses Maverick instructions, whereas %B does not"),
10187 ibfd, obfd);
10188 else
10189 _bfd_error_handler
10190 (_("error: %B does not use Maverick instructions, whereas %B does"),
10191 ibfd, obfd);
10192
10193 flags_compatible = FALSE;
10194 }
10195
10196 #ifdef EF_ARM_SOFT_FLOAT
10197 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10198 {
10199 /* We can allow interworking between code that is VFP format
10200 layout, and uses either soft float or integer regs for
10201 passing floating point arguments and results. We already
10202 know that the APCS_FLOAT flags match; similarly for VFP
10203 flags. */
10204 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10205 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10206 {
10207 if (in_flags & EF_ARM_SOFT_FLOAT)
10208 _bfd_error_handler
10209 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10210 ibfd, obfd);
10211 else
10212 _bfd_error_handler
10213 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10214 ibfd, obfd);
10215
10216 flags_compatible = FALSE;
10217 }
10218 }
10219 #endif
10220
10221 /* Interworking mismatch is only a warning. */
10222 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10223 {
10224 if (in_flags & EF_ARM_INTERWORK)
10225 {
10226 _bfd_error_handler
10227 (_("Warning: %B supports interworking, whereas %B does not"),
10228 ibfd, obfd);
10229 }
10230 else
10231 {
10232 _bfd_error_handler
10233 (_("Warning: %B does not support interworking, whereas %B does"),
10234 ibfd, obfd);
10235 }
10236 }
10237 }
10238
10239 return flags_compatible;
10240 }
10241
10242 /* Display the flags field. */
10243
10244 static bfd_boolean
10245 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10246 {
10247 FILE * file = (FILE *) ptr;
10248 unsigned long flags;
10249
10250 BFD_ASSERT (abfd != NULL && ptr != NULL);
10251
10252 /* Print normal ELF private data. */
10253 _bfd_elf_print_private_bfd_data (abfd, ptr);
10254
10255 flags = elf_elfheader (abfd)->e_flags;
10256 /* Ignore init flag - it may not be set, despite the flags field
10257 containing valid data. */
10258
10259 /* xgettext:c-format */
10260 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10261
10262 switch (EF_ARM_EABI_VERSION (flags))
10263 {
10264 case EF_ARM_EABI_UNKNOWN:
10265 /* The following flag bits are GNU extensions and not part of the
10266 official ARM ELF extended ABI. Hence they are only decoded if
10267 the EABI version is not set. */
10268 if (flags & EF_ARM_INTERWORK)
10269 fprintf (file, _(" [interworking enabled]"));
10270
10271 if (flags & EF_ARM_APCS_26)
10272 fprintf (file, " [APCS-26]");
10273 else
10274 fprintf (file, " [APCS-32]");
10275
10276 if (flags & EF_ARM_VFP_FLOAT)
10277 fprintf (file, _(" [VFP float format]"));
10278 else if (flags & EF_ARM_MAVERICK_FLOAT)
10279 fprintf (file, _(" [Maverick float format]"));
10280 else
10281 fprintf (file, _(" [FPA float format]"));
10282
10283 if (flags & EF_ARM_APCS_FLOAT)
10284 fprintf (file, _(" [floats passed in float registers]"));
10285
10286 if (flags & EF_ARM_PIC)
10287 fprintf (file, _(" [position independent]"));
10288
10289 if (flags & EF_ARM_NEW_ABI)
10290 fprintf (file, _(" [new ABI]"));
10291
10292 if (flags & EF_ARM_OLD_ABI)
10293 fprintf (file, _(" [old ABI]"));
10294
10295 if (flags & EF_ARM_SOFT_FLOAT)
10296 fprintf (file, _(" [software FP]"));
10297
10298 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10299 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10300 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10301 | EF_ARM_MAVERICK_FLOAT);
10302 break;
10303
10304 case EF_ARM_EABI_VER1:
10305 fprintf (file, _(" [Version1 EABI]"));
10306
10307 if (flags & EF_ARM_SYMSARESORTED)
10308 fprintf (file, _(" [sorted symbol table]"));
10309 else
10310 fprintf (file, _(" [unsorted symbol table]"));
10311
10312 flags &= ~ EF_ARM_SYMSARESORTED;
10313 break;
10314
10315 case EF_ARM_EABI_VER2:
10316 fprintf (file, _(" [Version2 EABI]"));
10317
10318 if (flags & EF_ARM_SYMSARESORTED)
10319 fprintf (file, _(" [sorted symbol table]"));
10320 else
10321 fprintf (file, _(" [unsorted symbol table]"));
10322
10323 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10324 fprintf (file, _(" [dynamic symbols use segment index]"));
10325
10326 if (flags & EF_ARM_MAPSYMSFIRST)
10327 fprintf (file, _(" [mapping symbols precede others]"));
10328
10329 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10330 | EF_ARM_MAPSYMSFIRST);
10331 break;
10332
10333 case EF_ARM_EABI_VER3:
10334 fprintf (file, _(" [Version3 EABI]"));
10335 break;
10336
10337 case EF_ARM_EABI_VER4:
10338 fprintf (file, _(" [Version4 EABI]"));
10339 goto eabi;
10340
10341 case EF_ARM_EABI_VER5:
10342 fprintf (file, _(" [Version5 EABI]"));
10343 eabi:
10344 if (flags & EF_ARM_BE8)
10345 fprintf (file, _(" [BE8]"));
10346
10347 if (flags & EF_ARM_LE8)
10348 fprintf (file, _(" [LE8]"));
10349
10350 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10351 break;
10352
10353 default:
10354 fprintf (file, _(" <EABI version unrecognised>"));
10355 break;
10356 }
10357
10358 flags &= ~ EF_ARM_EABIMASK;
10359
10360 if (flags & EF_ARM_RELEXEC)
10361 fprintf (file, _(" [relocatable executable]"));
10362
10363 if (flags & EF_ARM_HASENTRY)
10364 fprintf (file, _(" [has entry point]"));
10365
10366 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10367
10368 if (flags)
10369 fprintf (file, _("<Unrecognised flag bits set>"));
10370
10371 fputc ('\n', file);
10372
10373 return TRUE;
10374 }
10375
10376 static int
10377 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10378 {
10379 switch (ELF_ST_TYPE (elf_sym->st_info))
10380 {
10381 case STT_ARM_TFUNC:
10382 return ELF_ST_TYPE (elf_sym->st_info);
10383
10384 case STT_ARM_16BIT:
10385 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10386 This allows us to distinguish between data used by Thumb instructions
10387 and non-data (which is probably code) inside Thumb regions of an
10388 executable. */
10389 if (type != STT_OBJECT && type != STT_TLS)
10390 return ELF_ST_TYPE (elf_sym->st_info);
10391 break;
10392
10393 default:
10394 break;
10395 }
10396
10397 return type;
10398 }
10399
10400 static asection *
10401 elf32_arm_gc_mark_hook (asection *sec,
10402 struct bfd_link_info *info,
10403 Elf_Internal_Rela *rel,
10404 struct elf_link_hash_entry *h,
10405 Elf_Internal_Sym *sym)
10406 {
10407 if (h != NULL)
10408 switch (ELF32_R_TYPE (rel->r_info))
10409 {
10410 case R_ARM_GNU_VTINHERIT:
10411 case R_ARM_GNU_VTENTRY:
10412 return NULL;
10413 }
10414
10415 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10416 }
10417
10418 /* Update the got entry reference counts for the section being removed. */
10419
10420 static bfd_boolean
10421 elf32_arm_gc_sweep_hook (bfd * abfd,
10422 struct bfd_link_info * info,
10423 asection * sec,
10424 const Elf_Internal_Rela * relocs)
10425 {
10426 Elf_Internal_Shdr *symtab_hdr;
10427 struct elf_link_hash_entry **sym_hashes;
10428 bfd_signed_vma *local_got_refcounts;
10429 const Elf_Internal_Rela *rel, *relend;
10430 struct elf32_arm_link_hash_table * globals;
10431
10432 if (info->relocatable)
10433 return TRUE;
10434
10435 globals = elf32_arm_hash_table (info);
10436
10437 elf_section_data (sec)->local_dynrel = NULL;
10438
10439 symtab_hdr = & elf_symtab_hdr (abfd);
10440 sym_hashes = elf_sym_hashes (abfd);
10441 local_got_refcounts = elf_local_got_refcounts (abfd);
10442
10443 check_use_blx (globals);
10444
10445 relend = relocs + sec->reloc_count;
10446 for (rel = relocs; rel < relend; rel++)
10447 {
10448 unsigned long r_symndx;
10449 struct elf_link_hash_entry *h = NULL;
10450 int r_type;
10451
10452 r_symndx = ELF32_R_SYM (rel->r_info);
10453 if (r_symndx >= symtab_hdr->sh_info)
10454 {
10455 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10456 while (h->root.type == bfd_link_hash_indirect
10457 || h->root.type == bfd_link_hash_warning)
10458 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10459 }
10460
10461 r_type = ELF32_R_TYPE (rel->r_info);
10462 r_type = arm_real_reloc_type (globals, r_type);
10463 switch (r_type)
10464 {
10465 case R_ARM_GOT32:
10466 case R_ARM_GOT_PREL:
10467 case R_ARM_TLS_GD32:
10468 case R_ARM_TLS_IE32:
10469 if (h != NULL)
10470 {
10471 if (h->got.refcount > 0)
10472 h->got.refcount -= 1;
10473 }
10474 else if (local_got_refcounts != NULL)
10475 {
10476 if (local_got_refcounts[r_symndx] > 0)
10477 local_got_refcounts[r_symndx] -= 1;
10478 }
10479 break;
10480
10481 case R_ARM_TLS_LDM32:
10482 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10483 break;
10484
10485 case R_ARM_ABS32:
10486 case R_ARM_ABS32_NOI:
10487 case R_ARM_REL32:
10488 case R_ARM_REL32_NOI:
10489 case R_ARM_PC24:
10490 case R_ARM_PLT32:
10491 case R_ARM_CALL:
10492 case R_ARM_JUMP24:
10493 case R_ARM_PREL31:
10494 case R_ARM_THM_CALL:
10495 case R_ARM_THM_JUMP24:
10496 case R_ARM_THM_JUMP19:
10497 case R_ARM_MOVW_ABS_NC:
10498 case R_ARM_MOVT_ABS:
10499 case R_ARM_MOVW_PREL_NC:
10500 case R_ARM_MOVT_PREL:
10501 case R_ARM_THM_MOVW_ABS_NC:
10502 case R_ARM_THM_MOVT_ABS:
10503 case R_ARM_THM_MOVW_PREL_NC:
10504 case R_ARM_THM_MOVT_PREL:
10505 /* Should the interworking branches be here also? */
10506
10507 if (h != NULL)
10508 {
10509 struct elf32_arm_link_hash_entry *eh;
10510 struct elf32_arm_relocs_copied **pp;
10511 struct elf32_arm_relocs_copied *p;
10512
10513 eh = (struct elf32_arm_link_hash_entry *) h;
10514
10515 if (h->plt.refcount > 0)
10516 {
10517 h->plt.refcount -= 1;
10518 if (r_type == R_ARM_THM_CALL)
10519 eh->plt_maybe_thumb_refcount--;
10520
10521 if (r_type == R_ARM_THM_JUMP24
10522 || r_type == R_ARM_THM_JUMP19)
10523 eh->plt_thumb_refcount--;
10524 }
10525
10526 if (r_type == R_ARM_ABS32
10527 || r_type == R_ARM_REL32
10528 || r_type == R_ARM_ABS32_NOI
10529 || r_type == R_ARM_REL32_NOI)
10530 {
10531 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10532 pp = &p->next)
10533 if (p->section == sec)
10534 {
10535 p->count -= 1;
10536 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10537 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10538 p->pc_count -= 1;
10539 if (p->count == 0)
10540 *pp = p->next;
10541 break;
10542 }
10543 }
10544 }
10545 break;
10546
10547 default:
10548 break;
10549 }
10550 }
10551
10552 return TRUE;
10553 }
10554
10555 /* Look through the relocs for a section during the first phase. */
10556
10557 static bfd_boolean
10558 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10559 asection *sec, const Elf_Internal_Rela *relocs)
10560 {
10561 Elf_Internal_Shdr *symtab_hdr;
10562 struct elf_link_hash_entry **sym_hashes;
10563 const Elf_Internal_Rela *rel;
10564 const Elf_Internal_Rela *rel_end;
10565 bfd *dynobj;
10566 asection *sreloc;
10567 bfd_vma *local_got_offsets;
10568 struct elf32_arm_link_hash_table *htab;
10569 bfd_boolean needs_plt;
10570 unsigned long nsyms;
10571
10572 if (info->relocatable)
10573 return TRUE;
10574
10575 BFD_ASSERT (is_arm_elf (abfd));
10576
10577 htab = elf32_arm_hash_table (info);
10578 sreloc = NULL;
10579
10580 /* Create dynamic sections for relocatable executables so that we can
10581 copy relocations. */
10582 if (htab->root.is_relocatable_executable
10583 && ! htab->root.dynamic_sections_created)
10584 {
10585 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10586 return FALSE;
10587 }
10588
10589 dynobj = elf_hash_table (info)->dynobj;
10590 local_got_offsets = elf_local_got_offsets (abfd);
10591
10592 symtab_hdr = & elf_symtab_hdr (abfd);
10593 sym_hashes = elf_sym_hashes (abfd);
10594 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10595
10596 rel_end = relocs + sec->reloc_count;
10597 for (rel = relocs; rel < rel_end; rel++)
10598 {
10599 struct elf_link_hash_entry *h;
10600 struct elf32_arm_link_hash_entry *eh;
10601 unsigned long r_symndx;
10602 int r_type;
10603
10604 r_symndx = ELF32_R_SYM (rel->r_info);
10605 r_type = ELF32_R_TYPE (rel->r_info);
10606 r_type = arm_real_reloc_type (htab, r_type);
10607
10608 if (r_symndx >= nsyms
10609 /* PR 9934: It is possible to have relocations that do not
10610 refer to symbols, thus it is also possible to have an
10611 object file containing relocations but no symbol table. */
10612 && (r_symndx > 0 || nsyms > 0))
10613 {
10614 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10615 r_symndx);
10616 return FALSE;
10617 }
10618
10619 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10620 h = NULL;
10621 else
10622 {
10623 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10624 while (h->root.type == bfd_link_hash_indirect
10625 || h->root.type == bfd_link_hash_warning)
10626 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10627 }
10628
10629 eh = (struct elf32_arm_link_hash_entry *) h;
10630
10631 switch (r_type)
10632 {
10633 case R_ARM_GOT32:
10634 case R_ARM_GOT_PREL:
10635 case R_ARM_TLS_GD32:
10636 case R_ARM_TLS_IE32:
10637 /* This symbol requires a global offset table entry. */
10638 {
10639 int tls_type, old_tls_type;
10640
10641 switch (r_type)
10642 {
10643 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10644 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10645 default: tls_type = GOT_NORMAL; break;
10646 }
10647
10648 if (h != NULL)
10649 {
10650 h->got.refcount++;
10651 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10652 }
10653 else
10654 {
10655 bfd_signed_vma *local_got_refcounts;
10656
10657 /* This is a global offset table entry for a local symbol. */
10658 local_got_refcounts = elf_local_got_refcounts (abfd);
10659 if (local_got_refcounts == NULL)
10660 {
10661 bfd_size_type size;
10662
10663 size = symtab_hdr->sh_info;
10664 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10665 local_got_refcounts = bfd_zalloc (abfd, size);
10666 if (local_got_refcounts == NULL)
10667 return FALSE;
10668 elf_local_got_refcounts (abfd) = local_got_refcounts;
10669 elf32_arm_local_got_tls_type (abfd)
10670 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10671 }
10672 local_got_refcounts[r_symndx] += 1;
10673 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10674 }
10675
10676 /* We will already have issued an error message if there is a
10677 TLS / non-TLS mismatch, based on the symbol type. We don't
10678 support any linker relaxations. So just combine any TLS
10679 types needed. */
10680 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10681 && tls_type != GOT_NORMAL)
10682 tls_type |= old_tls_type;
10683
10684 if (old_tls_type != tls_type)
10685 {
10686 if (h != NULL)
10687 elf32_arm_hash_entry (h)->tls_type = tls_type;
10688 else
10689 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10690 }
10691 }
10692 /* Fall through. */
10693
10694 case R_ARM_TLS_LDM32:
10695 if (r_type == R_ARM_TLS_LDM32)
10696 htab->tls_ldm_got.refcount++;
10697 /* Fall through. */
10698
10699 case R_ARM_GOTOFF32:
10700 case R_ARM_GOTPC:
10701 if (htab->sgot == NULL)
10702 {
10703 if (htab->root.dynobj == NULL)
10704 htab->root.dynobj = abfd;
10705 if (!create_got_section (htab->root.dynobj, info))
10706 return FALSE;
10707 }
10708 break;
10709
10710 case R_ARM_ABS12:
10711 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10712 ldr __GOTT_INDEX__ offsets. */
10713 if (!htab->vxworks_p)
10714 break;
10715 /* Fall through. */
10716
10717 case R_ARM_PC24:
10718 case R_ARM_PLT32:
10719 case R_ARM_CALL:
10720 case R_ARM_JUMP24:
10721 case R_ARM_PREL31:
10722 case R_ARM_THM_CALL:
10723 case R_ARM_THM_JUMP24:
10724 case R_ARM_THM_JUMP19:
10725 needs_plt = 1;
10726 goto normal_reloc;
10727
10728 case R_ARM_MOVW_ABS_NC:
10729 case R_ARM_MOVT_ABS:
10730 case R_ARM_THM_MOVW_ABS_NC:
10731 case R_ARM_THM_MOVT_ABS:
10732 if (info->shared)
10733 {
10734 (*_bfd_error_handler)
10735 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10736 abfd, elf32_arm_howto_table_1[r_type].name,
10737 (h) ? h->root.root.string : "a local symbol");
10738 bfd_set_error (bfd_error_bad_value);
10739 return FALSE;
10740 }
10741
10742 /* Fall through. */
10743 case R_ARM_ABS32:
10744 case R_ARM_ABS32_NOI:
10745 case R_ARM_REL32:
10746 case R_ARM_REL32_NOI:
10747 case R_ARM_MOVW_PREL_NC:
10748 case R_ARM_MOVT_PREL:
10749 case R_ARM_THM_MOVW_PREL_NC:
10750 case R_ARM_THM_MOVT_PREL:
10751 needs_plt = 0;
10752 normal_reloc:
10753
10754 /* Should the interworking branches be listed here? */
10755 if (h != NULL)
10756 {
10757 /* If this reloc is in a read-only section, we might
10758 need a copy reloc. We can't check reliably at this
10759 stage whether the section is read-only, as input
10760 sections have not yet been mapped to output sections.
10761 Tentatively set the flag for now, and correct in
10762 adjust_dynamic_symbol. */
10763 if (!info->shared)
10764 h->non_got_ref = 1;
10765
10766 /* We may need a .plt entry if the function this reloc
10767 refers to is in a different object. We can't tell for
10768 sure yet, because something later might force the
10769 symbol local. */
10770 if (needs_plt)
10771 h->needs_plt = 1;
10772
10773 /* If we create a PLT entry, this relocation will reference
10774 it, even if it's an ABS32 relocation. */
10775 h->plt.refcount += 1;
10776
10777 /* It's too early to use htab->use_blx here, so we have to
10778 record possible blx references separately from
10779 relocs that definitely need a thumb stub. */
10780
10781 if (r_type == R_ARM_THM_CALL)
10782 eh->plt_maybe_thumb_refcount += 1;
10783
10784 if (r_type == R_ARM_THM_JUMP24
10785 || r_type == R_ARM_THM_JUMP19)
10786 eh->plt_thumb_refcount += 1;
10787 }
10788
10789 /* If we are creating a shared library or relocatable executable,
10790 and this is a reloc against a global symbol, or a non PC
10791 relative reloc against a local symbol, then we need to copy
10792 the reloc into the shared library. However, if we are linking
10793 with -Bsymbolic, we do not need to copy a reloc against a
10794 global symbol which is defined in an object we are
10795 including in the link (i.e., DEF_REGULAR is set). At
10796 this point we have not seen all the input files, so it is
10797 possible that DEF_REGULAR is not set now but will be set
10798 later (it is never cleared). We account for that
10799 possibility below by storing information in the
10800 relocs_copied field of the hash table entry. */
10801 if ((info->shared || htab->root.is_relocatable_executable)
10802 && (sec->flags & SEC_ALLOC) != 0
10803 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10804 || (h != NULL && ! h->needs_plt
10805 && (! info->symbolic || ! h->def_regular))))
10806 {
10807 struct elf32_arm_relocs_copied *p, **head;
10808
10809 /* When creating a shared object, we must copy these
10810 reloc types into the output file. We create a reloc
10811 section in dynobj and make room for this reloc. */
10812 if (sreloc == NULL)
10813 {
10814 sreloc = _bfd_elf_make_dynamic_reloc_section
10815 (sec, dynobj, 2, abfd, ! htab->use_rel);
10816
10817 if (sreloc == NULL)
10818 return FALSE;
10819
10820 /* BPABI objects never have dynamic relocations mapped. */
10821 if (htab->symbian_p)
10822 {
10823 flagword flags;
10824
10825 flags = bfd_get_section_flags (dynobj, sreloc);
10826 flags &= ~(SEC_LOAD | SEC_ALLOC);
10827 bfd_set_section_flags (dynobj, sreloc, flags);
10828 }
10829 }
10830
10831 /* If this is a global symbol, we count the number of
10832 relocations we need for this symbol. */
10833 if (h != NULL)
10834 {
10835 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10836 }
10837 else
10838 {
10839 /* Track dynamic relocs needed for local syms too.
10840 We really need local syms available to do this
10841 easily. Oh well. */
10842
10843 asection *s;
10844 void *vpp;
10845
10846 s = bfd_section_from_r_symndx (abfd, &htab->sym_sec,
10847 sec, r_symndx);
10848 if (s == NULL)
10849 return FALSE;
10850
10851 vpp = &elf_section_data (s)->local_dynrel;
10852 head = (struct elf32_arm_relocs_copied **) vpp;
10853 }
10854
10855 p = *head;
10856 if (p == NULL || p->section != sec)
10857 {
10858 bfd_size_type amt = sizeof *p;
10859
10860 p = bfd_alloc (htab->root.dynobj, amt);
10861 if (p == NULL)
10862 return FALSE;
10863 p->next = *head;
10864 *head = p;
10865 p->section = sec;
10866 p->count = 0;
10867 p->pc_count = 0;
10868 }
10869
10870 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10871 p->pc_count += 1;
10872 p->count += 1;
10873 }
10874 break;
10875
10876 /* This relocation describes the C++ object vtable hierarchy.
10877 Reconstruct it for later use during GC. */
10878 case R_ARM_GNU_VTINHERIT:
10879 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10880 return FALSE;
10881 break;
10882
10883 /* This relocation describes which C++ vtable entries are actually
10884 used. Record for later use during GC. */
10885 case R_ARM_GNU_VTENTRY:
10886 BFD_ASSERT (h != NULL);
10887 if (h != NULL
10888 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10889 return FALSE;
10890 break;
10891 }
10892 }
10893
10894 return TRUE;
10895 }
10896
10897 /* Unwinding tables are not referenced directly. This pass marks them as
10898 required if the corresponding code section is marked. */
10899
10900 static bfd_boolean
10901 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10902 elf_gc_mark_hook_fn gc_mark_hook)
10903 {
10904 bfd *sub;
10905 Elf_Internal_Shdr **elf_shdrp;
10906 bfd_boolean again;
10907
10908 /* Marking EH data may cause additional code sections to be marked,
10909 requiring multiple passes. */
10910 again = TRUE;
10911 while (again)
10912 {
10913 again = FALSE;
10914 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10915 {
10916 asection *o;
10917
10918 if (! is_arm_elf (sub))
10919 continue;
10920
10921 elf_shdrp = elf_elfsections (sub);
10922 for (o = sub->sections; o != NULL; o = o->next)
10923 {
10924 Elf_Internal_Shdr *hdr;
10925
10926 hdr = &elf_section_data (o)->this_hdr;
10927 if (hdr->sh_type == SHT_ARM_EXIDX
10928 && hdr->sh_link
10929 && hdr->sh_link < elf_numsections (sub)
10930 && !o->gc_mark
10931 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10932 {
10933 again = TRUE;
10934 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10935 return FALSE;
10936 }
10937 }
10938 }
10939 }
10940
10941 return TRUE;
10942 }
10943
10944 /* Treat mapping symbols as special target symbols. */
10945
10946 static bfd_boolean
10947 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
10948 {
10949 return bfd_is_arm_special_symbol_name (sym->name,
10950 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
10951 }
10952
10953 /* This is a copy of elf_find_function() from elf.c except that
10954 ARM mapping symbols are ignored when looking for function names
10955 and STT_ARM_TFUNC is considered to a function type. */
10956
10957 static bfd_boolean
10958 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
10959 asection * section,
10960 asymbol ** symbols,
10961 bfd_vma offset,
10962 const char ** filename_ptr,
10963 const char ** functionname_ptr)
10964 {
10965 const char * filename = NULL;
10966 asymbol * func = NULL;
10967 bfd_vma low_func = 0;
10968 asymbol ** p;
10969
10970 for (p = symbols; *p != NULL; p++)
10971 {
10972 elf_symbol_type *q;
10973
10974 q = (elf_symbol_type *) *p;
10975
10976 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
10977 {
10978 default:
10979 break;
10980 case STT_FILE:
10981 filename = bfd_asymbol_name (&q->symbol);
10982 break;
10983 case STT_FUNC:
10984 case STT_ARM_TFUNC:
10985 case STT_NOTYPE:
10986 /* Skip mapping symbols. */
10987 if ((q->symbol.flags & BSF_LOCAL)
10988 && bfd_is_arm_special_symbol_name (q->symbol.name,
10989 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
10990 continue;
10991 /* Fall through. */
10992 if (bfd_get_section (&q->symbol) == section
10993 && q->symbol.value >= low_func
10994 && q->symbol.value <= offset)
10995 {
10996 func = (asymbol *) q;
10997 low_func = q->symbol.value;
10998 }
10999 break;
11000 }
11001 }
11002
11003 if (func == NULL)
11004 return FALSE;
11005
11006 if (filename_ptr)
11007 *filename_ptr = filename;
11008 if (functionname_ptr)
11009 *functionname_ptr = bfd_asymbol_name (func);
11010
11011 return TRUE;
11012 }
11013
11014
11015 /* Find the nearest line to a particular section and offset, for error
11016 reporting. This code is a duplicate of the code in elf.c, except
11017 that it uses arm_elf_find_function. */
11018
11019 static bfd_boolean
11020 elf32_arm_find_nearest_line (bfd * abfd,
11021 asection * section,
11022 asymbol ** symbols,
11023 bfd_vma offset,
11024 const char ** filename_ptr,
11025 const char ** functionname_ptr,
11026 unsigned int * line_ptr)
11027 {
11028 bfd_boolean found = FALSE;
11029
11030 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11031
11032 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11033 filename_ptr, functionname_ptr,
11034 line_ptr, 0,
11035 & elf_tdata (abfd)->dwarf2_find_line_info))
11036 {
11037 if (!*functionname_ptr)
11038 arm_elf_find_function (abfd, section, symbols, offset,
11039 *filename_ptr ? NULL : filename_ptr,
11040 functionname_ptr);
11041
11042 return TRUE;
11043 }
11044
11045 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11046 & found, filename_ptr,
11047 functionname_ptr, line_ptr,
11048 & elf_tdata (abfd)->line_info))
11049 return FALSE;
11050
11051 if (found && (*functionname_ptr || *line_ptr))
11052 return TRUE;
11053
11054 if (symbols == NULL)
11055 return FALSE;
11056
11057 if (! arm_elf_find_function (abfd, section, symbols, offset,
11058 filename_ptr, functionname_ptr))
11059 return FALSE;
11060
11061 *line_ptr = 0;
11062 return TRUE;
11063 }
11064
11065 static bfd_boolean
11066 elf32_arm_find_inliner_info (bfd * abfd,
11067 const char ** filename_ptr,
11068 const char ** functionname_ptr,
11069 unsigned int * line_ptr)
11070 {
11071 bfd_boolean found;
11072 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11073 functionname_ptr, line_ptr,
11074 & elf_tdata (abfd)->dwarf2_find_line_info);
11075 return found;
11076 }
11077
11078 /* Adjust a symbol defined by a dynamic object and referenced by a
11079 regular object. The current definition is in some section of the
11080 dynamic object, but we're not including those sections. We have to
11081 change the definition to something the rest of the link can
11082 understand. */
11083
11084 static bfd_boolean
11085 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11086 struct elf_link_hash_entry * h)
11087 {
11088 bfd * dynobj;
11089 asection * s;
11090 struct elf32_arm_link_hash_entry * eh;
11091 struct elf32_arm_link_hash_table *globals;
11092
11093 globals = elf32_arm_hash_table (info);
11094 dynobj = elf_hash_table (info)->dynobj;
11095
11096 /* Make sure we know what is going on here. */
11097 BFD_ASSERT (dynobj != NULL
11098 && (h->needs_plt
11099 || h->u.weakdef != NULL
11100 || (h->def_dynamic
11101 && h->ref_regular
11102 && !h->def_regular)));
11103
11104 eh = (struct elf32_arm_link_hash_entry *) h;
11105
11106 /* If this is a function, put it in the procedure linkage table. We
11107 will fill in the contents of the procedure linkage table later,
11108 when we know the address of the .got section. */
11109 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11110 || h->needs_plt)
11111 {
11112 if (h->plt.refcount <= 0
11113 || SYMBOL_CALLS_LOCAL (info, h)
11114 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11115 && h->root.type == bfd_link_hash_undefweak))
11116 {
11117 /* This case can occur if we saw a PLT32 reloc in an input
11118 file, but the symbol was never referred to by a dynamic
11119 object, or if all references were garbage collected. In
11120 such a case, we don't actually need to build a procedure
11121 linkage table, and we can just do a PC24 reloc instead. */
11122 h->plt.offset = (bfd_vma) -1;
11123 eh->plt_thumb_refcount = 0;
11124 eh->plt_maybe_thumb_refcount = 0;
11125 h->needs_plt = 0;
11126 }
11127
11128 return TRUE;
11129 }
11130 else
11131 {
11132 /* It's possible that we incorrectly decided a .plt reloc was
11133 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11134 in check_relocs. We can't decide accurately between function
11135 and non-function syms in check-relocs; Objects loaded later in
11136 the link may change h->type. So fix it now. */
11137 h->plt.offset = (bfd_vma) -1;
11138 eh->plt_thumb_refcount = 0;
11139 eh->plt_maybe_thumb_refcount = 0;
11140 }
11141
11142 /* If this is a weak symbol, and there is a real definition, the
11143 processor independent code will have arranged for us to see the
11144 real definition first, and we can just use the same value. */
11145 if (h->u.weakdef != NULL)
11146 {
11147 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11148 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11149 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11150 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11151 return TRUE;
11152 }
11153
11154 /* If there are no non-GOT references, we do not need a copy
11155 relocation. */
11156 if (!h->non_got_ref)
11157 return TRUE;
11158
11159 /* This is a reference to a symbol defined by a dynamic object which
11160 is not a function. */
11161
11162 /* If we are creating a shared library, we must presume that the
11163 only references to the symbol are via the global offset table.
11164 For such cases we need not do anything here; the relocations will
11165 be handled correctly by relocate_section. Relocatable executables
11166 can reference data in shared objects directly, so we don't need to
11167 do anything here. */
11168 if (info->shared || globals->root.is_relocatable_executable)
11169 return TRUE;
11170
11171 if (h->size == 0)
11172 {
11173 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11174 h->root.root.string);
11175 return TRUE;
11176 }
11177
11178 /* We must allocate the symbol in our .dynbss section, which will
11179 become part of the .bss section of the executable. There will be
11180 an entry for this symbol in the .dynsym section. The dynamic
11181 object will contain position independent code, so all references
11182 from the dynamic object to this symbol will go through the global
11183 offset table. The dynamic linker will use the .dynsym entry to
11184 determine the address it must put in the global offset table, so
11185 both the dynamic object and the regular object will refer to the
11186 same memory location for the variable. */
11187 s = bfd_get_section_by_name (dynobj, ".dynbss");
11188 BFD_ASSERT (s != NULL);
11189
11190 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11191 copy the initial value out of the dynamic object and into the
11192 runtime process image. We need to remember the offset into the
11193 .rel(a).bss section we are going to use. */
11194 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11195 {
11196 asection *srel;
11197
11198 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11199 BFD_ASSERT (srel != NULL);
11200 srel->size += RELOC_SIZE (globals);
11201 h->needs_copy = 1;
11202 }
11203
11204 return _bfd_elf_adjust_dynamic_copy (h, s);
11205 }
11206
11207 /* Allocate space in .plt, .got and associated reloc sections for
11208 dynamic relocs. */
11209
11210 static bfd_boolean
11211 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11212 {
11213 struct bfd_link_info *info;
11214 struct elf32_arm_link_hash_table *htab;
11215 struct elf32_arm_link_hash_entry *eh;
11216 struct elf32_arm_relocs_copied *p;
11217 bfd_signed_vma thumb_refs;
11218
11219 eh = (struct elf32_arm_link_hash_entry *) h;
11220
11221 if (h->root.type == bfd_link_hash_indirect)
11222 return TRUE;
11223
11224 if (h->root.type == bfd_link_hash_warning)
11225 /* When warning symbols are created, they **replace** the "real"
11226 entry in the hash table, thus we never get to see the real
11227 symbol in a hash traversal. So look at it now. */
11228 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11229
11230 info = (struct bfd_link_info *) inf;
11231 htab = elf32_arm_hash_table (info);
11232
11233 if (htab->root.dynamic_sections_created
11234 && h->plt.refcount > 0)
11235 {
11236 /* Make sure this symbol is output as a dynamic symbol.
11237 Undefined weak syms won't yet be marked as dynamic. */
11238 if (h->dynindx == -1
11239 && !h->forced_local)
11240 {
11241 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11242 return FALSE;
11243 }
11244
11245 if (info->shared
11246 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11247 {
11248 asection *s = htab->splt;
11249
11250 /* If this is the first .plt entry, make room for the special
11251 first entry. */
11252 if (s->size == 0)
11253 s->size += htab->plt_header_size;
11254
11255 h->plt.offset = s->size;
11256
11257 /* If we will insert a Thumb trampoline before this PLT, leave room
11258 for it. */
11259 thumb_refs = eh->plt_thumb_refcount;
11260 if (!htab->use_blx)
11261 thumb_refs += eh->plt_maybe_thumb_refcount;
11262
11263 if (thumb_refs > 0)
11264 {
11265 h->plt.offset += PLT_THUMB_STUB_SIZE;
11266 s->size += PLT_THUMB_STUB_SIZE;
11267 }
11268
11269 /* If this symbol is not defined in a regular file, and we are
11270 not generating a shared library, then set the symbol to this
11271 location in the .plt. This is required to make function
11272 pointers compare as equal between the normal executable and
11273 the shared library. */
11274 if (! info->shared
11275 && !h->def_regular)
11276 {
11277 h->root.u.def.section = s;
11278 h->root.u.def.value = h->plt.offset;
11279
11280 /* Make sure the function is not marked as Thumb, in case
11281 it is the target of an ABS32 relocation, which will
11282 point to the PLT entry. */
11283 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11284 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11285 }
11286
11287 /* Make room for this entry. */
11288 s->size += htab->plt_entry_size;
11289
11290 if (!htab->symbian_p)
11291 {
11292 /* We also need to make an entry in the .got.plt section, which
11293 will be placed in the .got section by the linker script. */
11294 eh->plt_got_offset = htab->sgotplt->size;
11295 htab->sgotplt->size += 4;
11296 }
11297
11298 /* We also need to make an entry in the .rel(a).plt section. */
11299 htab->srelplt->size += RELOC_SIZE (htab);
11300
11301 /* VxWorks executables have a second set of relocations for
11302 each PLT entry. They go in a separate relocation section,
11303 which is processed by the kernel loader. */
11304 if (htab->vxworks_p && !info->shared)
11305 {
11306 /* There is a relocation for the initial PLT entry:
11307 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11308 if (h->plt.offset == htab->plt_header_size)
11309 htab->srelplt2->size += RELOC_SIZE (htab);
11310
11311 /* There are two extra relocations for each subsequent
11312 PLT entry: an R_ARM_32 relocation for the GOT entry,
11313 and an R_ARM_32 relocation for the PLT entry. */
11314 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11315 }
11316 }
11317 else
11318 {
11319 h->plt.offset = (bfd_vma) -1;
11320 h->needs_plt = 0;
11321 }
11322 }
11323 else
11324 {
11325 h->plt.offset = (bfd_vma) -1;
11326 h->needs_plt = 0;
11327 }
11328
11329 if (h->got.refcount > 0)
11330 {
11331 asection *s;
11332 bfd_boolean dyn;
11333 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11334 int indx;
11335
11336 /* Make sure this symbol is output as a dynamic symbol.
11337 Undefined weak syms won't yet be marked as dynamic. */
11338 if (h->dynindx == -1
11339 && !h->forced_local)
11340 {
11341 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11342 return FALSE;
11343 }
11344
11345 if (!htab->symbian_p)
11346 {
11347 s = htab->sgot;
11348 h->got.offset = s->size;
11349
11350 if (tls_type == GOT_UNKNOWN)
11351 abort ();
11352
11353 if (tls_type == GOT_NORMAL)
11354 /* Non-TLS symbols need one GOT slot. */
11355 s->size += 4;
11356 else
11357 {
11358 if (tls_type & GOT_TLS_GD)
11359 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11360 s->size += 8;
11361 if (tls_type & GOT_TLS_IE)
11362 /* R_ARM_TLS_IE32 needs one GOT slot. */
11363 s->size += 4;
11364 }
11365
11366 dyn = htab->root.dynamic_sections_created;
11367
11368 indx = 0;
11369 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11370 && (!info->shared
11371 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11372 indx = h->dynindx;
11373
11374 if (tls_type != GOT_NORMAL
11375 && (info->shared || indx != 0)
11376 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11377 || h->root.type != bfd_link_hash_undefweak))
11378 {
11379 if (tls_type & GOT_TLS_IE)
11380 htab->srelgot->size += RELOC_SIZE (htab);
11381
11382 if (tls_type & GOT_TLS_GD)
11383 htab->srelgot->size += RELOC_SIZE (htab);
11384
11385 if ((tls_type & GOT_TLS_GD) && indx != 0)
11386 htab->srelgot->size += RELOC_SIZE (htab);
11387 }
11388 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11389 || h->root.type != bfd_link_hash_undefweak)
11390 && (info->shared
11391 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11392 htab->srelgot->size += RELOC_SIZE (htab);
11393 }
11394 }
11395 else
11396 h->got.offset = (bfd_vma) -1;
11397
11398 /* Allocate stubs for exported Thumb functions on v4t. */
11399 if (!htab->use_blx && h->dynindx != -1
11400 && h->def_regular
11401 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11402 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11403 {
11404 struct elf_link_hash_entry * th;
11405 struct bfd_link_hash_entry * bh;
11406 struct elf_link_hash_entry * myh;
11407 char name[1024];
11408 asection *s;
11409 bh = NULL;
11410 /* Create a new symbol to regist the real location of the function. */
11411 s = h->root.u.def.section;
11412 sprintf (name, "__real_%s", h->root.root.string);
11413 _bfd_generic_link_add_one_symbol (info, s->owner,
11414 name, BSF_GLOBAL, s,
11415 h->root.u.def.value,
11416 NULL, TRUE, FALSE, &bh);
11417
11418 myh = (struct elf_link_hash_entry *) bh;
11419 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11420 myh->forced_local = 1;
11421 eh->export_glue = myh;
11422 th = record_arm_to_thumb_glue (info, h);
11423 /* Point the symbol at the stub. */
11424 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11425 h->root.u.def.section = th->root.u.def.section;
11426 h->root.u.def.value = th->root.u.def.value & ~1;
11427 }
11428
11429 if (eh->relocs_copied == NULL)
11430 return TRUE;
11431
11432 /* In the shared -Bsymbolic case, discard space allocated for
11433 dynamic pc-relative relocs against symbols which turn out to be
11434 defined in regular objects. For the normal shared case, discard
11435 space for pc-relative relocs that have become local due to symbol
11436 visibility changes. */
11437
11438 if (info->shared || htab->root.is_relocatable_executable)
11439 {
11440 /* The only relocs that use pc_count are R_ARM_REL32 and
11441 R_ARM_REL32_NOI, which will appear on something like
11442 ".long foo - .". We want calls to protected symbols to resolve
11443 directly to the function rather than going via the plt. If people
11444 want function pointer comparisons to work as expected then they
11445 should avoid writing assembly like ".long foo - .". */
11446 if (SYMBOL_CALLS_LOCAL (info, h))
11447 {
11448 struct elf32_arm_relocs_copied **pp;
11449
11450 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11451 {
11452 p->count -= p->pc_count;
11453 p->pc_count = 0;
11454 if (p->count == 0)
11455 *pp = p->next;
11456 else
11457 pp = &p->next;
11458 }
11459 }
11460
11461 if (elf32_arm_hash_table (info)->vxworks_p)
11462 {
11463 struct elf32_arm_relocs_copied **pp;
11464
11465 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11466 {
11467 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11468 *pp = p->next;
11469 else
11470 pp = &p->next;
11471 }
11472 }
11473
11474 /* Also discard relocs on undefined weak syms with non-default
11475 visibility. */
11476 if (eh->relocs_copied != NULL
11477 && h->root.type == bfd_link_hash_undefweak)
11478 {
11479 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11480 eh->relocs_copied = NULL;
11481
11482 /* Make sure undefined weak symbols are output as a dynamic
11483 symbol in PIEs. */
11484 else if (h->dynindx == -1
11485 && !h->forced_local)
11486 {
11487 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11488 return FALSE;
11489 }
11490 }
11491
11492 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11493 && h->root.type == bfd_link_hash_new)
11494 {
11495 /* Output absolute symbols so that we can create relocations
11496 against them. For normal symbols we output a relocation
11497 against the section that contains them. */
11498 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11499 return FALSE;
11500 }
11501
11502 }
11503 else
11504 {
11505 /* For the non-shared case, discard space for relocs against
11506 symbols which turn out to need copy relocs or are not
11507 dynamic. */
11508
11509 if (!h->non_got_ref
11510 && ((h->def_dynamic
11511 && !h->def_regular)
11512 || (htab->root.dynamic_sections_created
11513 && (h->root.type == bfd_link_hash_undefweak
11514 || h->root.type == bfd_link_hash_undefined))))
11515 {
11516 /* Make sure this symbol is output as a dynamic symbol.
11517 Undefined weak syms won't yet be marked as dynamic. */
11518 if (h->dynindx == -1
11519 && !h->forced_local)
11520 {
11521 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11522 return FALSE;
11523 }
11524
11525 /* If that succeeded, we know we'll be keeping all the
11526 relocs. */
11527 if (h->dynindx != -1)
11528 goto keep;
11529 }
11530
11531 eh->relocs_copied = NULL;
11532
11533 keep: ;
11534 }
11535
11536 /* Finally, allocate space. */
11537 for (p = eh->relocs_copied; p != NULL; p = p->next)
11538 {
11539 asection *sreloc = elf_section_data (p->section)->sreloc;
11540 sreloc->size += p->count * RELOC_SIZE (htab);
11541 }
11542
11543 return TRUE;
11544 }
11545
11546 /* Find any dynamic relocs that apply to read-only sections. */
11547
11548 static bfd_boolean
11549 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11550 {
11551 struct elf32_arm_link_hash_entry * eh;
11552 struct elf32_arm_relocs_copied * p;
11553
11554 if (h->root.type == bfd_link_hash_warning)
11555 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11556
11557 eh = (struct elf32_arm_link_hash_entry *) h;
11558 for (p = eh->relocs_copied; p != NULL; p = p->next)
11559 {
11560 asection *s = p->section;
11561
11562 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11563 {
11564 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11565
11566 info->flags |= DF_TEXTREL;
11567
11568 /* Not an error, just cut short the traversal. */
11569 return FALSE;
11570 }
11571 }
11572 return TRUE;
11573 }
11574
11575 void
11576 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11577 int byteswap_code)
11578 {
11579 struct elf32_arm_link_hash_table *globals;
11580
11581 globals = elf32_arm_hash_table (info);
11582 globals->byteswap_code = byteswap_code;
11583 }
11584
11585 /* Set the sizes of the dynamic sections. */
11586
11587 static bfd_boolean
11588 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11589 struct bfd_link_info * info)
11590 {
11591 bfd * dynobj;
11592 asection * s;
11593 bfd_boolean plt;
11594 bfd_boolean relocs;
11595 bfd *ibfd;
11596 struct elf32_arm_link_hash_table *htab;
11597
11598 htab = elf32_arm_hash_table (info);
11599 dynobj = elf_hash_table (info)->dynobj;
11600 BFD_ASSERT (dynobj != NULL);
11601 check_use_blx (htab);
11602
11603 if (elf_hash_table (info)->dynamic_sections_created)
11604 {
11605 /* Set the contents of the .interp section to the interpreter. */
11606 if (info->executable)
11607 {
11608 s = bfd_get_section_by_name (dynobj, ".interp");
11609 BFD_ASSERT (s != NULL);
11610 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11611 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11612 }
11613 }
11614
11615 /* Set up .got offsets for local syms, and space for local dynamic
11616 relocs. */
11617 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11618 {
11619 bfd_signed_vma *local_got;
11620 bfd_signed_vma *end_local_got;
11621 char *local_tls_type;
11622 bfd_size_type locsymcount;
11623 Elf_Internal_Shdr *symtab_hdr;
11624 asection *srel;
11625 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11626
11627 if (! is_arm_elf (ibfd))
11628 continue;
11629
11630 for (s = ibfd->sections; s != NULL; s = s->next)
11631 {
11632 struct elf32_arm_relocs_copied *p;
11633
11634 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11635 {
11636 if (!bfd_is_abs_section (p->section)
11637 && bfd_is_abs_section (p->section->output_section))
11638 {
11639 /* Input section has been discarded, either because
11640 it is a copy of a linkonce section or due to
11641 linker script /DISCARD/, so we'll be discarding
11642 the relocs too. */
11643 }
11644 else if (is_vxworks
11645 && strcmp (p->section->output_section->name,
11646 ".tls_vars") == 0)
11647 {
11648 /* Relocations in vxworks .tls_vars sections are
11649 handled specially by the loader. */
11650 }
11651 else if (p->count != 0)
11652 {
11653 srel = elf_section_data (p->section)->sreloc;
11654 srel->size += p->count * RELOC_SIZE (htab);
11655 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11656 info->flags |= DF_TEXTREL;
11657 }
11658 }
11659 }
11660
11661 local_got = elf_local_got_refcounts (ibfd);
11662 if (!local_got)
11663 continue;
11664
11665 symtab_hdr = & elf_symtab_hdr (ibfd);
11666 locsymcount = symtab_hdr->sh_info;
11667 end_local_got = local_got + locsymcount;
11668 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11669 s = htab->sgot;
11670 srel = htab->srelgot;
11671 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11672 {
11673 if (*local_got > 0)
11674 {
11675 *local_got = s->size;
11676 if (*local_tls_type & GOT_TLS_GD)
11677 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11678 s->size += 8;
11679 if (*local_tls_type & GOT_TLS_IE)
11680 s->size += 4;
11681 if (*local_tls_type == GOT_NORMAL)
11682 s->size += 4;
11683
11684 if (info->shared || *local_tls_type == GOT_TLS_GD)
11685 srel->size += RELOC_SIZE (htab);
11686 }
11687 else
11688 *local_got = (bfd_vma) -1;
11689 }
11690 }
11691
11692 if (htab->tls_ldm_got.refcount > 0)
11693 {
11694 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11695 for R_ARM_TLS_LDM32 relocations. */
11696 htab->tls_ldm_got.offset = htab->sgot->size;
11697 htab->sgot->size += 8;
11698 if (info->shared)
11699 htab->srelgot->size += RELOC_SIZE (htab);
11700 }
11701 else
11702 htab->tls_ldm_got.offset = -1;
11703
11704 /* Allocate global sym .plt and .got entries, and space for global
11705 sym dynamic relocs. */
11706 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11707
11708 /* Here we rummage through the found bfds to collect glue information. */
11709 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11710 {
11711 if (! is_arm_elf (ibfd))
11712 continue;
11713
11714 /* Initialise mapping tables for code/data. */
11715 bfd_elf32_arm_init_maps (ibfd);
11716
11717 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11718 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11719 /* xgettext:c-format */
11720 _bfd_error_handler (_("Errors encountered processing file %s"),
11721 ibfd->filename);
11722 }
11723
11724 /* Allocate space for the glue sections now that we've sized them. */
11725 bfd_elf32_arm_allocate_interworking_sections (info);
11726
11727 /* The check_relocs and adjust_dynamic_symbol entry points have
11728 determined the sizes of the various dynamic sections. Allocate
11729 memory for them. */
11730 plt = FALSE;
11731 relocs = FALSE;
11732 for (s = dynobj->sections; s != NULL; s = s->next)
11733 {
11734 const char * name;
11735
11736 if ((s->flags & SEC_LINKER_CREATED) == 0)
11737 continue;
11738
11739 /* It's OK to base decisions on the section name, because none
11740 of the dynobj section names depend upon the input files. */
11741 name = bfd_get_section_name (dynobj, s);
11742
11743 if (strcmp (name, ".plt") == 0)
11744 {
11745 /* Remember whether there is a PLT. */
11746 plt = s->size != 0;
11747 }
11748 else if (CONST_STRNEQ (name, ".rel"))
11749 {
11750 if (s->size != 0)
11751 {
11752 /* Remember whether there are any reloc sections other
11753 than .rel(a).plt and .rela.plt.unloaded. */
11754 if (s != htab->srelplt && s != htab->srelplt2)
11755 relocs = TRUE;
11756
11757 /* We use the reloc_count field as a counter if we need
11758 to copy relocs into the output file. */
11759 s->reloc_count = 0;
11760 }
11761 }
11762 else if (! CONST_STRNEQ (name, ".got")
11763 && strcmp (name, ".dynbss") != 0)
11764 {
11765 /* It's not one of our sections, so don't allocate space. */
11766 continue;
11767 }
11768
11769 if (s->size == 0)
11770 {
11771 /* If we don't need this section, strip it from the
11772 output file. This is mostly to handle .rel(a).bss and
11773 .rel(a).plt. We must create both sections in
11774 create_dynamic_sections, because they must be created
11775 before the linker maps input sections to output
11776 sections. The linker does that before
11777 adjust_dynamic_symbol is called, and it is that
11778 function which decides whether anything needs to go
11779 into these sections. */
11780 s->flags |= SEC_EXCLUDE;
11781 continue;
11782 }
11783
11784 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11785 continue;
11786
11787 /* Allocate memory for the section contents. */
11788 s->contents = bfd_zalloc (dynobj, s->size);
11789 if (s->contents == NULL)
11790 return FALSE;
11791 }
11792
11793 if (elf_hash_table (info)->dynamic_sections_created)
11794 {
11795 /* Add some entries to the .dynamic section. We fill in the
11796 values later, in elf32_arm_finish_dynamic_sections, but we
11797 must add the entries now so that we get the correct size for
11798 the .dynamic section. The DT_DEBUG entry is filled in by the
11799 dynamic linker and used by the debugger. */
11800 #define add_dynamic_entry(TAG, VAL) \
11801 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11802
11803 if (info->executable)
11804 {
11805 if (!add_dynamic_entry (DT_DEBUG, 0))
11806 return FALSE;
11807 }
11808
11809 if (plt)
11810 {
11811 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11812 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11813 || !add_dynamic_entry (DT_PLTREL,
11814 htab->use_rel ? DT_REL : DT_RELA)
11815 || !add_dynamic_entry (DT_JMPREL, 0))
11816 return FALSE;
11817 }
11818
11819 if (relocs)
11820 {
11821 if (htab->use_rel)
11822 {
11823 if (!add_dynamic_entry (DT_REL, 0)
11824 || !add_dynamic_entry (DT_RELSZ, 0)
11825 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11826 return FALSE;
11827 }
11828 else
11829 {
11830 if (!add_dynamic_entry (DT_RELA, 0)
11831 || !add_dynamic_entry (DT_RELASZ, 0)
11832 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11833 return FALSE;
11834 }
11835 }
11836
11837 /* If any dynamic relocs apply to a read-only section,
11838 then we need a DT_TEXTREL entry. */
11839 if ((info->flags & DF_TEXTREL) == 0)
11840 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11841 info);
11842
11843 if ((info->flags & DF_TEXTREL) != 0)
11844 {
11845 if (!add_dynamic_entry (DT_TEXTREL, 0))
11846 return FALSE;
11847 }
11848 if (htab->vxworks_p
11849 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11850 return FALSE;
11851 }
11852 #undef add_dynamic_entry
11853
11854 return TRUE;
11855 }
11856
11857 /* Finish up dynamic symbol handling. We set the contents of various
11858 dynamic sections here. */
11859
11860 static bfd_boolean
11861 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11862 struct bfd_link_info * info,
11863 struct elf_link_hash_entry * h,
11864 Elf_Internal_Sym * sym)
11865 {
11866 bfd * dynobj;
11867 struct elf32_arm_link_hash_table *htab;
11868 struct elf32_arm_link_hash_entry *eh;
11869
11870 dynobj = elf_hash_table (info)->dynobj;
11871 htab = elf32_arm_hash_table (info);
11872 eh = (struct elf32_arm_link_hash_entry *) h;
11873
11874 if (h->plt.offset != (bfd_vma) -1)
11875 {
11876 asection * splt;
11877 asection * srel;
11878 bfd_byte *loc;
11879 bfd_vma plt_index;
11880 Elf_Internal_Rela rel;
11881
11882 /* This symbol has an entry in the procedure linkage table. Set
11883 it up. */
11884
11885 BFD_ASSERT (h->dynindx != -1);
11886
11887 splt = bfd_get_section_by_name (dynobj, ".plt");
11888 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11889 BFD_ASSERT (splt != NULL && srel != NULL);
11890
11891 /* Fill in the entry in the procedure linkage table. */
11892 if (htab->symbian_p)
11893 {
11894 put_arm_insn (htab, output_bfd,
11895 elf32_arm_symbian_plt_entry[0],
11896 splt->contents + h->plt.offset);
11897 bfd_put_32 (output_bfd,
11898 elf32_arm_symbian_plt_entry[1],
11899 splt->contents + h->plt.offset + 4);
11900
11901 /* Fill in the entry in the .rel.plt section. */
11902 rel.r_offset = (splt->output_section->vma
11903 + splt->output_offset
11904 + h->plt.offset + 4);
11905 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11906
11907 /* Get the index in the procedure linkage table which
11908 corresponds to this symbol. This is the index of this symbol
11909 in all the symbols for which we are making plt entries. The
11910 first entry in the procedure linkage table is reserved. */
11911 plt_index = ((h->plt.offset - htab->plt_header_size)
11912 / htab->plt_entry_size);
11913 }
11914 else
11915 {
11916 bfd_vma got_offset, got_address, plt_address;
11917 bfd_vma got_displacement;
11918 asection * sgot;
11919 bfd_byte * ptr;
11920
11921 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11922 BFD_ASSERT (sgot != NULL);
11923
11924 /* Get the offset into the .got.plt table of the entry that
11925 corresponds to this function. */
11926 got_offset = eh->plt_got_offset;
11927
11928 /* Get the index in the procedure linkage table which
11929 corresponds to this symbol. This is the index of this symbol
11930 in all the symbols for which we are making plt entries. The
11931 first three entries in .got.plt are reserved; after that
11932 symbols appear in the same order as in .plt. */
11933 plt_index = (got_offset - 12) / 4;
11934
11935 /* Calculate the address of the GOT entry. */
11936 got_address = (sgot->output_section->vma
11937 + sgot->output_offset
11938 + got_offset);
11939
11940 /* ...and the address of the PLT entry. */
11941 plt_address = (splt->output_section->vma
11942 + splt->output_offset
11943 + h->plt.offset);
11944
11945 ptr = htab->splt->contents + h->plt.offset;
11946 if (htab->vxworks_p && info->shared)
11947 {
11948 unsigned int i;
11949 bfd_vma val;
11950
11951 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11952 {
11953 val = elf32_arm_vxworks_shared_plt_entry[i];
11954 if (i == 2)
11955 val |= got_address - sgot->output_section->vma;
11956 if (i == 5)
11957 val |= plt_index * RELOC_SIZE (htab);
11958 if (i == 2 || i == 5)
11959 bfd_put_32 (output_bfd, val, ptr);
11960 else
11961 put_arm_insn (htab, output_bfd, val, ptr);
11962 }
11963 }
11964 else if (htab->vxworks_p)
11965 {
11966 unsigned int i;
11967 bfd_vma val;
11968
11969 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11970 {
11971 val = elf32_arm_vxworks_exec_plt_entry[i];
11972 if (i == 2)
11973 val |= got_address;
11974 if (i == 4)
11975 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
11976 if (i == 5)
11977 val |= plt_index * RELOC_SIZE (htab);
11978 if (i == 2 || i == 5)
11979 bfd_put_32 (output_bfd, val, ptr);
11980 else
11981 put_arm_insn (htab, output_bfd, val, ptr);
11982 }
11983
11984 loc = (htab->srelplt2->contents
11985 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
11986
11987 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
11988 referencing the GOT for this PLT entry. */
11989 rel.r_offset = plt_address + 8;
11990 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
11991 rel.r_addend = got_offset;
11992 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
11993 loc += RELOC_SIZE (htab);
11994
11995 /* Create the R_ARM_ABS32 relocation referencing the
11996 beginning of the PLT for this GOT entry. */
11997 rel.r_offset = got_address;
11998 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
11999 rel.r_addend = 0;
12000 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12001 }
12002 else
12003 {
12004 bfd_signed_vma thumb_refs;
12005 /* Calculate the displacement between the PLT slot and the
12006 entry in the GOT. The eight-byte offset accounts for the
12007 value produced by adding to pc in the first instruction
12008 of the PLT stub. */
12009 got_displacement = got_address - (plt_address + 8);
12010
12011 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12012
12013 thumb_refs = eh->plt_thumb_refcount;
12014 if (!htab->use_blx)
12015 thumb_refs += eh->plt_maybe_thumb_refcount;
12016
12017 if (thumb_refs > 0)
12018 {
12019 put_thumb_insn (htab, output_bfd,
12020 elf32_arm_plt_thumb_stub[0], ptr - 4);
12021 put_thumb_insn (htab, output_bfd,
12022 elf32_arm_plt_thumb_stub[1], ptr - 2);
12023 }
12024
12025 put_arm_insn (htab, output_bfd,
12026 elf32_arm_plt_entry[0]
12027 | ((got_displacement & 0x0ff00000) >> 20),
12028 ptr + 0);
12029 put_arm_insn (htab, output_bfd,
12030 elf32_arm_plt_entry[1]
12031 | ((got_displacement & 0x000ff000) >> 12),
12032 ptr+ 4);
12033 put_arm_insn (htab, output_bfd,
12034 elf32_arm_plt_entry[2]
12035 | (got_displacement & 0x00000fff),
12036 ptr + 8);
12037 #ifdef FOUR_WORD_PLT
12038 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12039 #endif
12040 }
12041
12042 /* Fill in the entry in the global offset table. */
12043 bfd_put_32 (output_bfd,
12044 (splt->output_section->vma
12045 + splt->output_offset),
12046 sgot->contents + got_offset);
12047
12048 /* Fill in the entry in the .rel(a).plt section. */
12049 rel.r_addend = 0;
12050 rel.r_offset = got_address;
12051 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12052 }
12053
12054 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12055 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12056
12057 if (!h->def_regular)
12058 {
12059 /* Mark the symbol as undefined, rather than as defined in
12060 the .plt section. Leave the value alone. */
12061 sym->st_shndx = SHN_UNDEF;
12062 /* If the symbol is weak, we do need to clear the value.
12063 Otherwise, the PLT entry would provide a definition for
12064 the symbol even if the symbol wasn't defined anywhere,
12065 and so the symbol would never be NULL. */
12066 if (!h->ref_regular_nonweak)
12067 sym->st_value = 0;
12068 }
12069 }
12070
12071 if (h->got.offset != (bfd_vma) -1
12072 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12073 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12074 {
12075 asection * sgot;
12076 asection * srel;
12077 Elf_Internal_Rela rel;
12078 bfd_byte *loc;
12079 bfd_vma offset;
12080
12081 /* This symbol has an entry in the global offset table. Set it
12082 up. */
12083 sgot = bfd_get_section_by_name (dynobj, ".got");
12084 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12085 BFD_ASSERT (sgot != NULL && srel != NULL);
12086
12087 offset = (h->got.offset & ~(bfd_vma) 1);
12088 rel.r_addend = 0;
12089 rel.r_offset = (sgot->output_section->vma
12090 + sgot->output_offset
12091 + offset);
12092
12093 /* If this is a static link, or it is a -Bsymbolic link and the
12094 symbol is defined locally or was forced to be local because
12095 of a version file, we just want to emit a RELATIVE reloc.
12096 The entry in the global offset table will already have been
12097 initialized in the relocate_section function. */
12098 if (info->shared
12099 && SYMBOL_REFERENCES_LOCAL (info, h))
12100 {
12101 BFD_ASSERT ((h->got.offset & 1) != 0);
12102 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12103 if (!htab->use_rel)
12104 {
12105 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12106 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12107 }
12108 }
12109 else
12110 {
12111 BFD_ASSERT ((h->got.offset & 1) == 0);
12112 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12113 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12114 }
12115
12116 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12117 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12118 }
12119
12120 if (h->needs_copy)
12121 {
12122 asection * s;
12123 Elf_Internal_Rela rel;
12124 bfd_byte *loc;
12125
12126 /* This symbol needs a copy reloc. Set it up. */
12127 BFD_ASSERT (h->dynindx != -1
12128 && (h->root.type == bfd_link_hash_defined
12129 || h->root.type == bfd_link_hash_defweak));
12130
12131 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12132 RELOC_SECTION (htab, ".bss"));
12133 BFD_ASSERT (s != NULL);
12134
12135 rel.r_addend = 0;
12136 rel.r_offset = (h->root.u.def.value
12137 + h->root.u.def.section->output_section->vma
12138 + h->root.u.def.section->output_offset);
12139 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12140 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12141 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12142 }
12143
12144 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12145 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12146 to the ".got" section. */
12147 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12148 || (!htab->vxworks_p && h == htab->root.hgot))
12149 sym->st_shndx = SHN_ABS;
12150
12151 return TRUE;
12152 }
12153
12154 /* Finish up the dynamic sections. */
12155
12156 static bfd_boolean
12157 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12158 {
12159 bfd * dynobj;
12160 asection * sgot;
12161 asection * sdyn;
12162
12163 dynobj = elf_hash_table (info)->dynobj;
12164
12165 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12166 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12167 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12168
12169 if (elf_hash_table (info)->dynamic_sections_created)
12170 {
12171 asection *splt;
12172 Elf32_External_Dyn *dyncon, *dynconend;
12173 struct elf32_arm_link_hash_table *htab;
12174
12175 htab = elf32_arm_hash_table (info);
12176 splt = bfd_get_section_by_name (dynobj, ".plt");
12177 BFD_ASSERT (splt != NULL && sdyn != NULL);
12178
12179 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12180 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12181
12182 for (; dyncon < dynconend; dyncon++)
12183 {
12184 Elf_Internal_Dyn dyn;
12185 const char * name;
12186 asection * s;
12187
12188 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12189
12190 switch (dyn.d_tag)
12191 {
12192 unsigned int type;
12193
12194 default:
12195 if (htab->vxworks_p
12196 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12197 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12198 break;
12199
12200 case DT_HASH:
12201 name = ".hash";
12202 goto get_vma_if_bpabi;
12203 case DT_STRTAB:
12204 name = ".dynstr";
12205 goto get_vma_if_bpabi;
12206 case DT_SYMTAB:
12207 name = ".dynsym";
12208 goto get_vma_if_bpabi;
12209 case DT_VERSYM:
12210 name = ".gnu.version";
12211 goto get_vma_if_bpabi;
12212 case DT_VERDEF:
12213 name = ".gnu.version_d";
12214 goto get_vma_if_bpabi;
12215 case DT_VERNEED:
12216 name = ".gnu.version_r";
12217 goto get_vma_if_bpabi;
12218
12219 case DT_PLTGOT:
12220 name = ".got";
12221 goto get_vma;
12222 case DT_JMPREL:
12223 name = RELOC_SECTION (htab, ".plt");
12224 get_vma:
12225 s = bfd_get_section_by_name (output_bfd, name);
12226 BFD_ASSERT (s != NULL);
12227 if (!htab->symbian_p)
12228 dyn.d_un.d_ptr = s->vma;
12229 else
12230 /* In the BPABI, tags in the PT_DYNAMIC section point
12231 at the file offset, not the memory address, for the
12232 convenience of the post linker. */
12233 dyn.d_un.d_ptr = s->filepos;
12234 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12235 break;
12236
12237 get_vma_if_bpabi:
12238 if (htab->symbian_p)
12239 goto get_vma;
12240 break;
12241
12242 case DT_PLTRELSZ:
12243 s = bfd_get_section_by_name (output_bfd,
12244 RELOC_SECTION (htab, ".plt"));
12245 BFD_ASSERT (s != NULL);
12246 dyn.d_un.d_val = s->size;
12247 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12248 break;
12249
12250 case DT_RELSZ:
12251 case DT_RELASZ:
12252 if (!htab->symbian_p)
12253 {
12254 /* My reading of the SVR4 ABI indicates that the
12255 procedure linkage table relocs (DT_JMPREL) should be
12256 included in the overall relocs (DT_REL). This is
12257 what Solaris does. However, UnixWare can not handle
12258 that case. Therefore, we override the DT_RELSZ entry
12259 here to make it not include the JMPREL relocs. Since
12260 the linker script arranges for .rel(a).plt to follow all
12261 other relocation sections, we don't have to worry
12262 about changing the DT_REL entry. */
12263 s = bfd_get_section_by_name (output_bfd,
12264 RELOC_SECTION (htab, ".plt"));
12265 if (s != NULL)
12266 dyn.d_un.d_val -= s->size;
12267 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12268 break;
12269 }
12270 /* Fall through. */
12271
12272 case DT_REL:
12273 case DT_RELA:
12274 /* In the BPABI, the DT_REL tag must point at the file
12275 offset, not the VMA, of the first relocation
12276 section. So, we use code similar to that in
12277 elflink.c, but do not check for SHF_ALLOC on the
12278 relcoation section, since relocations sections are
12279 never allocated under the BPABI. The comments above
12280 about Unixware notwithstanding, we include all of the
12281 relocations here. */
12282 if (htab->symbian_p)
12283 {
12284 unsigned int i;
12285 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12286 ? SHT_REL : SHT_RELA);
12287 dyn.d_un.d_val = 0;
12288 for (i = 1; i < elf_numsections (output_bfd); i++)
12289 {
12290 Elf_Internal_Shdr *hdr
12291 = elf_elfsections (output_bfd)[i];
12292 if (hdr->sh_type == type)
12293 {
12294 if (dyn.d_tag == DT_RELSZ
12295 || dyn.d_tag == DT_RELASZ)
12296 dyn.d_un.d_val += hdr->sh_size;
12297 else if ((ufile_ptr) hdr->sh_offset
12298 <= dyn.d_un.d_val - 1)
12299 dyn.d_un.d_val = hdr->sh_offset;
12300 }
12301 }
12302 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12303 }
12304 break;
12305
12306 /* Set the bottom bit of DT_INIT/FINI if the
12307 corresponding function is Thumb. */
12308 case DT_INIT:
12309 name = info->init_function;
12310 goto get_sym;
12311 case DT_FINI:
12312 name = info->fini_function;
12313 get_sym:
12314 /* If it wasn't set by elf_bfd_final_link
12315 then there is nothing to adjust. */
12316 if (dyn.d_un.d_val != 0)
12317 {
12318 struct elf_link_hash_entry * eh;
12319
12320 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12321 FALSE, FALSE, TRUE);
12322 if (eh != NULL
12323 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12324 {
12325 dyn.d_un.d_val |= 1;
12326 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12327 }
12328 }
12329 break;
12330 }
12331 }
12332
12333 /* Fill in the first entry in the procedure linkage table. */
12334 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12335 {
12336 const bfd_vma *plt0_entry;
12337 bfd_vma got_address, plt_address, got_displacement;
12338
12339 /* Calculate the addresses of the GOT and PLT. */
12340 got_address = sgot->output_section->vma + sgot->output_offset;
12341 plt_address = splt->output_section->vma + splt->output_offset;
12342
12343 if (htab->vxworks_p)
12344 {
12345 /* The VxWorks GOT is relocated by the dynamic linker.
12346 Therefore, we must emit relocations rather than simply
12347 computing the values now. */
12348 Elf_Internal_Rela rel;
12349
12350 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12351 put_arm_insn (htab, output_bfd, plt0_entry[0],
12352 splt->contents + 0);
12353 put_arm_insn (htab, output_bfd, plt0_entry[1],
12354 splt->contents + 4);
12355 put_arm_insn (htab, output_bfd, plt0_entry[2],
12356 splt->contents + 8);
12357 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12358
12359 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12360 rel.r_offset = plt_address + 12;
12361 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12362 rel.r_addend = 0;
12363 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12364 htab->srelplt2->contents);
12365 }
12366 else
12367 {
12368 got_displacement = got_address - (plt_address + 16);
12369
12370 plt0_entry = elf32_arm_plt0_entry;
12371 put_arm_insn (htab, output_bfd, plt0_entry[0],
12372 splt->contents + 0);
12373 put_arm_insn (htab, output_bfd, plt0_entry[1],
12374 splt->contents + 4);
12375 put_arm_insn (htab, output_bfd, plt0_entry[2],
12376 splt->contents + 8);
12377 put_arm_insn (htab, output_bfd, plt0_entry[3],
12378 splt->contents + 12);
12379
12380 #ifdef FOUR_WORD_PLT
12381 /* The displacement value goes in the otherwise-unused
12382 last word of the second entry. */
12383 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12384 #else
12385 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12386 #endif
12387 }
12388 }
12389
12390 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12391 really seem like the right value. */
12392 if (splt->output_section->owner == output_bfd)
12393 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12394
12395 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12396 {
12397 /* Correct the .rel(a).plt.unloaded relocations. They will have
12398 incorrect symbol indexes. */
12399 int num_plts;
12400 unsigned char *p;
12401
12402 num_plts = ((htab->splt->size - htab->plt_header_size)
12403 / htab->plt_entry_size);
12404 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12405
12406 for (; num_plts; num_plts--)
12407 {
12408 Elf_Internal_Rela rel;
12409
12410 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12411 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12412 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12413 p += RELOC_SIZE (htab);
12414
12415 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12416 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12417 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12418 p += RELOC_SIZE (htab);
12419 }
12420 }
12421 }
12422
12423 /* Fill in the first three entries in the global offset table. */
12424 if (sgot)
12425 {
12426 if (sgot->size > 0)
12427 {
12428 if (sdyn == NULL)
12429 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12430 else
12431 bfd_put_32 (output_bfd,
12432 sdyn->output_section->vma + sdyn->output_offset,
12433 sgot->contents);
12434 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12435 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12436 }
12437
12438 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12439 }
12440
12441 return TRUE;
12442 }
12443
12444 static void
12445 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12446 {
12447 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12448 struct elf32_arm_link_hash_table *globals;
12449
12450 i_ehdrp = elf_elfheader (abfd);
12451
12452 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12453 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12454 else
12455 i_ehdrp->e_ident[EI_OSABI] = 0;
12456 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12457
12458 if (link_info)
12459 {
12460 globals = elf32_arm_hash_table (link_info);
12461 if (globals->byteswap_code)
12462 i_ehdrp->e_flags |= EF_ARM_BE8;
12463 }
12464 }
12465
12466 static enum elf_reloc_type_class
12467 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12468 {
12469 switch ((int) ELF32_R_TYPE (rela->r_info))
12470 {
12471 case R_ARM_RELATIVE:
12472 return reloc_class_relative;
12473 case R_ARM_JUMP_SLOT:
12474 return reloc_class_plt;
12475 case R_ARM_COPY:
12476 return reloc_class_copy;
12477 default:
12478 return reloc_class_normal;
12479 }
12480 }
12481
12482 /* Set the right machine number for an Arm ELF file. */
12483
12484 static bfd_boolean
12485 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12486 {
12487 if (hdr->sh_type == SHT_NOTE)
12488 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12489
12490 return TRUE;
12491 }
12492
12493 static void
12494 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12495 {
12496 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12497 }
12498
12499 /* Return TRUE if this is an unwinding table entry. */
12500
12501 static bfd_boolean
12502 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12503 {
12504 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12505 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12506 }
12507
12508
12509 /* Set the type and flags for an ARM section. We do this by
12510 the section name, which is a hack, but ought to work. */
12511
12512 static bfd_boolean
12513 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12514 {
12515 const char * name;
12516
12517 name = bfd_get_section_name (abfd, sec);
12518
12519 if (is_arm_elf_unwind_section_name (abfd, name))
12520 {
12521 hdr->sh_type = SHT_ARM_EXIDX;
12522 hdr->sh_flags |= SHF_LINK_ORDER;
12523 }
12524 return TRUE;
12525 }
12526
12527 /* Handle an ARM specific section when reading an object file. This is
12528 called when bfd_section_from_shdr finds a section with an unknown
12529 type. */
12530
12531 static bfd_boolean
12532 elf32_arm_section_from_shdr (bfd *abfd,
12533 Elf_Internal_Shdr * hdr,
12534 const char *name,
12535 int shindex)
12536 {
12537 /* There ought to be a place to keep ELF backend specific flags, but
12538 at the moment there isn't one. We just keep track of the
12539 sections by their name, instead. Fortunately, the ABI gives
12540 names for all the ARM specific sections, so we will probably get
12541 away with this. */
12542 switch (hdr->sh_type)
12543 {
12544 case SHT_ARM_EXIDX:
12545 case SHT_ARM_PREEMPTMAP:
12546 case SHT_ARM_ATTRIBUTES:
12547 break;
12548
12549 default:
12550 return FALSE;
12551 }
12552
12553 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12554 return FALSE;
12555
12556 return TRUE;
12557 }
12558
12559 /* A structure used to record a list of sections, independently
12560 of the next and prev fields in the asection structure. */
12561 typedef struct section_list
12562 {
12563 asection * sec;
12564 struct section_list * next;
12565 struct section_list * prev;
12566 }
12567 section_list;
12568
12569 /* Unfortunately we need to keep a list of sections for which
12570 an _arm_elf_section_data structure has been allocated. This
12571 is because it is possible for functions like elf32_arm_write_section
12572 to be called on a section which has had an elf_data_structure
12573 allocated for it (and so the used_by_bfd field is valid) but
12574 for which the ARM extended version of this structure - the
12575 _arm_elf_section_data structure - has not been allocated. */
12576 static section_list * sections_with_arm_elf_section_data = NULL;
12577
12578 static void
12579 record_section_with_arm_elf_section_data (asection * sec)
12580 {
12581 struct section_list * entry;
12582
12583 entry = bfd_malloc (sizeof (* entry));
12584 if (entry == NULL)
12585 return;
12586 entry->sec = sec;
12587 entry->next = sections_with_arm_elf_section_data;
12588 entry->prev = NULL;
12589 if (entry->next != NULL)
12590 entry->next->prev = entry;
12591 sections_with_arm_elf_section_data = entry;
12592 }
12593
12594 static struct section_list *
12595 find_arm_elf_section_entry (asection * sec)
12596 {
12597 struct section_list * entry;
12598 static struct section_list * last_entry = NULL;
12599
12600 /* This is a short cut for the typical case where the sections are added
12601 to the sections_with_arm_elf_section_data list in forward order and
12602 then looked up here in backwards order. This makes a real difference
12603 to the ld-srec/sec64k.exp linker test. */
12604 entry = sections_with_arm_elf_section_data;
12605 if (last_entry != NULL)
12606 {
12607 if (last_entry->sec == sec)
12608 entry = last_entry;
12609 else if (last_entry->next != NULL
12610 && last_entry->next->sec == sec)
12611 entry = last_entry->next;
12612 }
12613
12614 for (; entry; entry = entry->next)
12615 if (entry->sec == sec)
12616 break;
12617
12618 if (entry)
12619 /* Record the entry prior to this one - it is the entry we are most
12620 likely to want to locate next time. Also this way if we have been
12621 called from unrecord_section_with_arm_elf_section_data() we will not
12622 be caching a pointer that is about to be freed. */
12623 last_entry = entry->prev;
12624
12625 return entry;
12626 }
12627
12628 static _arm_elf_section_data *
12629 get_arm_elf_section_data (asection * sec)
12630 {
12631 struct section_list * entry;
12632
12633 entry = find_arm_elf_section_entry (sec);
12634
12635 if (entry)
12636 return elf32_arm_section_data (entry->sec);
12637 else
12638 return NULL;
12639 }
12640
12641 static void
12642 unrecord_section_with_arm_elf_section_data (asection * sec)
12643 {
12644 struct section_list * entry;
12645
12646 entry = find_arm_elf_section_entry (sec);
12647
12648 if (entry)
12649 {
12650 if (entry->prev != NULL)
12651 entry->prev->next = entry->next;
12652 if (entry->next != NULL)
12653 entry->next->prev = entry->prev;
12654 if (entry == sections_with_arm_elf_section_data)
12655 sections_with_arm_elf_section_data = entry->next;
12656 free (entry);
12657 }
12658 }
12659
12660
12661 typedef struct
12662 {
12663 void *finfo;
12664 struct bfd_link_info *info;
12665 asection *sec;
12666 int sec_shndx;
12667 int (*func) (void *, const char *, Elf_Internal_Sym *,
12668 asection *, struct elf_link_hash_entry *);
12669 } output_arch_syminfo;
12670
12671 enum map_symbol_type
12672 {
12673 ARM_MAP_ARM,
12674 ARM_MAP_THUMB,
12675 ARM_MAP_DATA
12676 };
12677
12678
12679 /* Output a single mapping symbol. */
12680
12681 static bfd_boolean
12682 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12683 enum map_symbol_type type,
12684 bfd_vma offset)
12685 {
12686 static const char *names[3] = {"$a", "$t", "$d"};
12687 struct elf32_arm_link_hash_table *htab;
12688 Elf_Internal_Sym sym;
12689
12690 htab = elf32_arm_hash_table (osi->info);
12691 sym.st_value = osi->sec->output_section->vma
12692 + osi->sec->output_offset
12693 + offset;
12694 sym.st_size = 0;
12695 sym.st_other = 0;
12696 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12697 sym.st_shndx = osi->sec_shndx;
12698 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12699 }
12700
12701
12702 /* Output mapping symbols for PLT entries associated with H. */
12703
12704 static bfd_boolean
12705 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12706 {
12707 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12708 struct elf32_arm_link_hash_table *htab;
12709 struct elf32_arm_link_hash_entry *eh;
12710 bfd_vma addr;
12711
12712 htab = elf32_arm_hash_table (osi->info);
12713
12714 if (h->root.type == bfd_link_hash_indirect)
12715 return TRUE;
12716
12717 if (h->root.type == bfd_link_hash_warning)
12718 /* When warning symbols are created, they **replace** the "real"
12719 entry in the hash table, thus we never get to see the real
12720 symbol in a hash traversal. So look at it now. */
12721 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12722
12723 if (h->plt.offset == (bfd_vma) -1)
12724 return TRUE;
12725
12726 eh = (struct elf32_arm_link_hash_entry *) h;
12727 addr = h->plt.offset;
12728 if (htab->symbian_p)
12729 {
12730 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12731 return FALSE;
12732 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12733 return FALSE;
12734 }
12735 else if (htab->vxworks_p)
12736 {
12737 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12738 return FALSE;
12739 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12740 return FALSE;
12741 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12742 return FALSE;
12743 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12744 return FALSE;
12745 }
12746 else
12747 {
12748 bfd_signed_vma thumb_refs;
12749
12750 thumb_refs = eh->plt_thumb_refcount;
12751 if (!htab->use_blx)
12752 thumb_refs += eh->plt_maybe_thumb_refcount;
12753
12754 if (thumb_refs > 0)
12755 {
12756 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12757 return FALSE;
12758 }
12759 #ifdef FOUR_WORD_PLT
12760 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12761 return FALSE;
12762 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12763 return FALSE;
12764 #else
12765 /* A three-word PLT with no Thumb thunk contains only Arm code,
12766 so only need to output a mapping symbol for the first PLT entry and
12767 entries with thumb thunks. */
12768 if (thumb_refs > 0 || addr == 20)
12769 {
12770 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12771 return FALSE;
12772 }
12773 #endif
12774 }
12775
12776 return TRUE;
12777 }
12778
12779 /* Output a single local symbol for a generated stub. */
12780
12781 static bfd_boolean
12782 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12783 bfd_vma offset, bfd_vma size)
12784 {
12785 struct elf32_arm_link_hash_table *htab;
12786 Elf_Internal_Sym sym;
12787
12788 htab = elf32_arm_hash_table (osi->info);
12789 sym.st_value = osi->sec->output_section->vma
12790 + osi->sec->output_offset
12791 + offset;
12792 sym.st_size = size;
12793 sym.st_other = 0;
12794 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12795 sym.st_shndx = osi->sec_shndx;
12796 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12797 }
12798
12799 static bfd_boolean
12800 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12801 void * in_arg)
12802 {
12803 struct elf32_arm_stub_hash_entry *stub_entry;
12804 struct bfd_link_info *info;
12805 struct elf32_arm_link_hash_table *htab;
12806 asection *stub_sec;
12807 bfd_vma addr;
12808 char *stub_name;
12809 output_arch_syminfo *osi;
12810 const insn_sequence *template;
12811 enum stub_insn_type prev_type;
12812 int size;
12813 int i;
12814 enum map_symbol_type sym_type;
12815
12816 /* Massage our args to the form they really have. */
12817 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12818 osi = (output_arch_syminfo *) in_arg;
12819
12820 info = osi->info;
12821
12822 htab = elf32_arm_hash_table (info);
12823 stub_sec = stub_entry->stub_sec;
12824
12825 /* Ensure this stub is attached to the current section being
12826 processed. */
12827 if (stub_sec != osi->sec)
12828 return TRUE;
12829
12830 addr = (bfd_vma) stub_entry->stub_offset;
12831 stub_name = stub_entry->output_name;
12832
12833 template = stub_entry->stub_template;
12834 switch (template[0].type)
12835 {
12836 case ARM_TYPE:
12837 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12838 return FALSE;
12839 break;
12840 case THUMB16_TYPE:
12841 case THUMB32_TYPE:
12842 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12843 stub_entry->stub_size))
12844 return FALSE;
12845 break;
12846 default:
12847 BFD_FAIL ();
12848 return 0;
12849 }
12850
12851 prev_type = DATA_TYPE;
12852 size = 0;
12853 for (i = 0; i < stub_entry->stub_template_size; i++)
12854 {
12855 switch (template[i].type)
12856 {
12857 case ARM_TYPE:
12858 sym_type = ARM_MAP_ARM;
12859 break;
12860
12861 case THUMB16_TYPE:
12862 case THUMB32_TYPE:
12863 sym_type = ARM_MAP_THUMB;
12864 break;
12865
12866 case DATA_TYPE:
12867 sym_type = ARM_MAP_DATA;
12868 break;
12869
12870 default:
12871 BFD_FAIL ();
12872 return FALSE;
12873 }
12874
12875 if (template[i].type != prev_type)
12876 {
12877 prev_type = template[i].type;
12878 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12879 return FALSE;
12880 }
12881
12882 switch (template[i].type)
12883 {
12884 case ARM_TYPE:
12885 case THUMB32_TYPE:
12886 size += 4;
12887 break;
12888
12889 case THUMB16_TYPE:
12890 size += 2;
12891 break;
12892
12893 case DATA_TYPE:
12894 size += 4;
12895 break;
12896
12897 default:
12898 BFD_FAIL ();
12899 return FALSE;
12900 }
12901 }
12902
12903 return TRUE;
12904 }
12905
12906 /* Output mapping symbols for linker generated sections. */
12907
12908 static bfd_boolean
12909 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12910 struct bfd_link_info *info,
12911 void *finfo,
12912 int (*func) (void *, const char *,
12913 Elf_Internal_Sym *,
12914 asection *,
12915 struct elf_link_hash_entry *))
12916 {
12917 output_arch_syminfo osi;
12918 struct elf32_arm_link_hash_table *htab;
12919 bfd_vma offset;
12920 bfd_size_type size;
12921
12922 htab = elf32_arm_hash_table (info);
12923 check_use_blx (htab);
12924
12925 osi.finfo = finfo;
12926 osi.info = info;
12927 osi.func = func;
12928
12929 /* ARM->Thumb glue. */
12930 if (htab->arm_glue_size > 0)
12931 {
12932 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12933 ARM2THUMB_GLUE_SECTION_NAME);
12934
12935 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12936 (output_bfd, osi.sec->output_section);
12937 if (info->shared || htab->root.is_relocatable_executable
12938 || htab->pic_veneer)
12939 size = ARM2THUMB_PIC_GLUE_SIZE;
12940 else if (htab->use_blx)
12941 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12942 else
12943 size = ARM2THUMB_STATIC_GLUE_SIZE;
12944
12945 for (offset = 0; offset < htab->arm_glue_size; offset += size)
12946 {
12947 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
12948 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
12949 }
12950 }
12951
12952 /* Thumb->ARM glue. */
12953 if (htab->thumb_glue_size > 0)
12954 {
12955 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12956 THUMB2ARM_GLUE_SECTION_NAME);
12957
12958 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12959 (output_bfd, osi.sec->output_section);
12960 size = THUMB2ARM_GLUE_SIZE;
12961
12962 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
12963 {
12964 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
12965 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
12966 }
12967 }
12968
12969 /* ARMv4 BX veneers. */
12970 if (htab->bx_glue_size > 0)
12971 {
12972 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12973 ARM_BX_GLUE_SECTION_NAME);
12974
12975 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12976 (output_bfd, osi.sec->output_section);
12977
12978 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
12979 }
12980
12981 /* Long calls stubs. */
12982 if (htab->stub_bfd && htab->stub_bfd->sections)
12983 {
12984 asection* stub_sec;
12985
12986 for (stub_sec = htab->stub_bfd->sections;
12987 stub_sec != NULL;
12988 stub_sec = stub_sec->next)
12989 {
12990 /* Ignore non-stub sections. */
12991 if (!strstr (stub_sec->name, STUB_SUFFIX))
12992 continue;
12993
12994 osi.sec = stub_sec;
12995
12996 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12997 (output_bfd, osi.sec->output_section);
12998
12999 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13000 }
13001 }
13002
13003 /* Finally, output mapping symbols for the PLT. */
13004 if (!htab->splt || htab->splt->size == 0)
13005 return TRUE;
13006
13007 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13008 htab->splt->output_section);
13009 osi.sec = htab->splt;
13010 /* Output mapping symbols for the plt header. SymbianOS does not have a
13011 plt header. */
13012 if (htab->vxworks_p)
13013 {
13014 /* VxWorks shared libraries have no PLT header. */
13015 if (!info->shared)
13016 {
13017 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13018 return FALSE;
13019 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13020 return FALSE;
13021 }
13022 }
13023 else if (!htab->symbian_p)
13024 {
13025 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13026 return FALSE;
13027 #ifndef FOUR_WORD_PLT
13028 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13029 return FALSE;
13030 #endif
13031 }
13032
13033 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13034 return TRUE;
13035 }
13036
13037 /* Allocate target specific section data. */
13038
13039 static bfd_boolean
13040 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13041 {
13042 if (!sec->used_by_bfd)
13043 {
13044 _arm_elf_section_data *sdata;
13045 bfd_size_type amt = sizeof (*sdata);
13046
13047 sdata = bfd_zalloc (abfd, amt);
13048 if (sdata == NULL)
13049 return FALSE;
13050 sec->used_by_bfd = sdata;
13051 }
13052
13053 record_section_with_arm_elf_section_data (sec);
13054
13055 return _bfd_elf_new_section_hook (abfd, sec);
13056 }
13057
13058
13059 /* Used to order a list of mapping symbols by address. */
13060
13061 static int
13062 elf32_arm_compare_mapping (const void * a, const void * b)
13063 {
13064 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13065 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13066
13067 if (amap->vma > bmap->vma)
13068 return 1;
13069 else if (amap->vma < bmap->vma)
13070 return -1;
13071 else if (amap->type > bmap->type)
13072 /* Ensure results do not depend on the host qsort for objects with
13073 multiple mapping symbols at the same address by sorting on type
13074 after vma. */
13075 return 1;
13076 else if (amap->type < bmap->type)
13077 return -1;
13078 else
13079 return 0;
13080 }
13081
13082 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13083
13084 static unsigned long
13085 offset_prel31 (unsigned long addr, bfd_vma offset)
13086 {
13087 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13088 }
13089
13090 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13091 relocations. */
13092
13093 static void
13094 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13095 {
13096 unsigned long first_word = bfd_get_32 (output_bfd, from);
13097 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13098
13099 /* High bit of first word is supposed to be zero. */
13100 if ((first_word & 0x80000000ul) == 0)
13101 first_word = offset_prel31 (first_word, offset);
13102
13103 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13104 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13105 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13106 second_word = offset_prel31 (second_word, offset);
13107
13108 bfd_put_32 (output_bfd, first_word, to);
13109 bfd_put_32 (output_bfd, second_word, to + 4);
13110 }
13111
13112 /* Data for make_branch_to_a8_stub(). */
13113
13114 struct a8_branch_to_stub_data {
13115 asection *writing_section;
13116 bfd_byte *contents;
13117 };
13118
13119
13120 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13121 places for a particular section. */
13122
13123 static bfd_boolean
13124 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13125 void *in_arg)
13126 {
13127 struct elf32_arm_stub_hash_entry *stub_entry;
13128 struct a8_branch_to_stub_data *data;
13129 bfd_byte *contents;
13130 unsigned long branch_insn;
13131 bfd_vma veneered_insn_loc, veneer_entry_loc;
13132 bfd_signed_vma branch_offset;
13133 bfd *abfd;
13134 unsigned int index;
13135
13136 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13137 data = (struct a8_branch_to_stub_data *) in_arg;
13138
13139 if (stub_entry->target_section != data->writing_section
13140 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13141 return TRUE;
13142
13143 contents = data->contents;
13144
13145 veneered_insn_loc = stub_entry->target_section->output_section->vma
13146 + stub_entry->target_section->output_offset
13147 + stub_entry->target_value;
13148
13149 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13150 + stub_entry->stub_sec->output_offset
13151 + stub_entry->stub_offset;
13152
13153 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13154 veneered_insn_loc &= ~3u;
13155
13156 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13157
13158 abfd = stub_entry->target_section->owner;
13159 index = stub_entry->target_value;
13160
13161 /* We attempt to avoid this condition by setting stubs_always_after_branch
13162 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13163 This check is just to be on the safe side... */
13164 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13165 {
13166 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13167 "allocated in unsafe location"), abfd);
13168 return FALSE;
13169 }
13170
13171 switch (stub_entry->stub_type)
13172 {
13173 case arm_stub_a8_veneer_b:
13174 case arm_stub_a8_veneer_b_cond:
13175 branch_insn = 0xf0009000;
13176 goto jump24;
13177
13178 case arm_stub_a8_veneer_blx:
13179 branch_insn = 0xf000e800;
13180 goto jump24;
13181
13182 case arm_stub_a8_veneer_bl:
13183 {
13184 unsigned int i1, j1, i2, j2, s;
13185
13186 branch_insn = 0xf000d000;
13187
13188 jump24:
13189 if (branch_offset < -16777216 || branch_offset > 16777214)
13190 {
13191 /* There's not much we can do apart from complain if this
13192 happens. */
13193 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13194 "of range (input file too large)"), abfd);
13195 return FALSE;
13196 }
13197
13198 /* i1 = not(j1 eor s), so:
13199 not i1 = j1 eor s
13200 j1 = (not i1) eor s. */
13201
13202 branch_insn |= (branch_offset >> 1) & 0x7ff;
13203 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13204 i2 = (branch_offset >> 22) & 1;
13205 i1 = (branch_offset >> 23) & 1;
13206 s = (branch_offset >> 24) & 1;
13207 j1 = (!i1) ^ s;
13208 j2 = (!i2) ^ s;
13209 branch_insn |= j2 << 11;
13210 branch_insn |= j1 << 13;
13211 branch_insn |= s << 26;
13212 }
13213 break;
13214
13215 default:
13216 BFD_FAIL ();
13217 return FALSE;
13218 }
13219
13220 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13221 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13222
13223 return TRUE;
13224 }
13225
13226 /* Do code byteswapping. Return FALSE afterwards so that the section is
13227 written out as normal. */
13228
13229 static bfd_boolean
13230 elf32_arm_write_section (bfd *output_bfd,
13231 struct bfd_link_info *link_info,
13232 asection *sec,
13233 bfd_byte *contents)
13234 {
13235 unsigned int mapcount, errcount;
13236 _arm_elf_section_data *arm_data;
13237 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13238 elf32_arm_section_map *map;
13239 elf32_vfp11_erratum_list *errnode;
13240 bfd_vma ptr;
13241 bfd_vma end;
13242 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13243 bfd_byte tmp;
13244 unsigned int i;
13245
13246 /* If this section has not been allocated an _arm_elf_section_data
13247 structure then we cannot record anything. */
13248 arm_data = get_arm_elf_section_data (sec);
13249 if (arm_data == NULL)
13250 return FALSE;
13251
13252 mapcount = arm_data->mapcount;
13253 map = arm_data->map;
13254 errcount = arm_data->erratumcount;
13255
13256 if (errcount != 0)
13257 {
13258 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13259
13260 for (errnode = arm_data->erratumlist; errnode != 0;
13261 errnode = errnode->next)
13262 {
13263 bfd_vma index = errnode->vma - offset;
13264
13265 switch (errnode->type)
13266 {
13267 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13268 {
13269 bfd_vma branch_to_veneer;
13270 /* Original condition code of instruction, plus bit mask for
13271 ARM B instruction. */
13272 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13273 | 0x0a000000;
13274
13275 /* The instruction is before the label. */
13276 index -= 4;
13277
13278 /* Above offset included in -4 below. */
13279 branch_to_veneer = errnode->u.b.veneer->vma
13280 - errnode->vma - 4;
13281
13282 if ((signed) branch_to_veneer < -(1 << 25)
13283 || (signed) branch_to_veneer >= (1 << 25))
13284 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13285 "range"), output_bfd);
13286
13287 insn |= (branch_to_veneer >> 2) & 0xffffff;
13288 contents[endianflip ^ index] = insn & 0xff;
13289 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13290 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13291 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13292 }
13293 break;
13294
13295 case VFP11_ERRATUM_ARM_VENEER:
13296 {
13297 bfd_vma branch_from_veneer;
13298 unsigned int insn;
13299
13300 /* Take size of veneer into account. */
13301 branch_from_veneer = errnode->u.v.branch->vma
13302 - errnode->vma - 12;
13303
13304 if ((signed) branch_from_veneer < -(1 << 25)
13305 || (signed) branch_from_veneer >= (1 << 25))
13306 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13307 "range"), output_bfd);
13308
13309 /* Original instruction. */
13310 insn = errnode->u.v.branch->u.b.vfp_insn;
13311 contents[endianflip ^ index] = insn & 0xff;
13312 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13313 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13314 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13315
13316 /* Branch back to insn after original insn. */
13317 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13318 contents[endianflip ^ (index + 4)] = insn & 0xff;
13319 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13320 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13321 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13322 }
13323 break;
13324
13325 default:
13326 abort ();
13327 }
13328 }
13329 }
13330
13331 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13332 {
13333 arm_unwind_table_edit *edit_node
13334 = arm_data->u.exidx.unwind_edit_list;
13335 /* Now, sec->size is the size of the section we will write. The original
13336 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13337 markers) was sec->rawsize. (This isn't the case if we perform no
13338 edits, then rawsize will be zero and we should use size). */
13339 bfd_byte *edited_contents = bfd_malloc (sec->size);
13340 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13341 unsigned int in_index, out_index;
13342 bfd_vma add_to_offsets = 0;
13343
13344 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13345 {
13346 if (edit_node)
13347 {
13348 unsigned int edit_index = edit_node->index;
13349
13350 if (in_index < edit_index && in_index * 8 < input_size)
13351 {
13352 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13353 contents + in_index * 8, add_to_offsets);
13354 out_index++;
13355 in_index++;
13356 }
13357 else if (in_index == edit_index
13358 || (in_index * 8 >= input_size
13359 && edit_index == UINT_MAX))
13360 {
13361 switch (edit_node->type)
13362 {
13363 case DELETE_EXIDX_ENTRY:
13364 in_index++;
13365 add_to_offsets += 8;
13366 break;
13367
13368 case INSERT_EXIDX_CANTUNWIND_AT_END:
13369 {
13370 asection *text_sec = edit_node->linked_section;
13371 bfd_vma text_offset = text_sec->output_section->vma
13372 + text_sec->output_offset
13373 + text_sec->size;
13374 bfd_vma exidx_offset = offset + out_index * 8;
13375 unsigned long prel31_offset;
13376
13377 /* Note: this is meant to be equivalent to an
13378 R_ARM_PREL31 relocation. These synthetic
13379 EXIDX_CANTUNWIND markers are not relocated by the
13380 usual BFD method. */
13381 prel31_offset = (text_offset - exidx_offset)
13382 & 0x7ffffffful;
13383
13384 /* First address we can't unwind. */
13385 bfd_put_32 (output_bfd, prel31_offset,
13386 &edited_contents[out_index * 8]);
13387
13388 /* Code for EXIDX_CANTUNWIND. */
13389 bfd_put_32 (output_bfd, 0x1,
13390 &edited_contents[out_index * 8 + 4]);
13391
13392 out_index++;
13393 add_to_offsets -= 8;
13394 }
13395 break;
13396 }
13397
13398 edit_node = edit_node->next;
13399 }
13400 }
13401 else
13402 {
13403 /* No more edits, copy remaining entries verbatim. */
13404 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13405 contents + in_index * 8, add_to_offsets);
13406 out_index++;
13407 in_index++;
13408 }
13409 }
13410
13411 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13412 bfd_set_section_contents (output_bfd, sec->output_section,
13413 edited_contents,
13414 (file_ptr) sec->output_offset, sec->size);
13415
13416 return TRUE;
13417 }
13418
13419 /* Fix code to point to Cortex-A8 erratum stubs. */
13420 if (globals->fix_cortex_a8)
13421 {
13422 struct a8_branch_to_stub_data data;
13423
13424 data.writing_section = sec;
13425 data.contents = contents;
13426
13427 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13428 &data);
13429 }
13430
13431 if (mapcount == 0)
13432 return FALSE;
13433
13434 if (globals->byteswap_code)
13435 {
13436 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13437
13438 ptr = map[0].vma;
13439 for (i = 0; i < mapcount; i++)
13440 {
13441 if (i == mapcount - 1)
13442 end = sec->size;
13443 else
13444 end = map[i + 1].vma;
13445
13446 switch (map[i].type)
13447 {
13448 case 'a':
13449 /* Byte swap code words. */
13450 while (ptr + 3 < end)
13451 {
13452 tmp = contents[ptr];
13453 contents[ptr] = contents[ptr + 3];
13454 contents[ptr + 3] = tmp;
13455 tmp = contents[ptr + 1];
13456 contents[ptr + 1] = contents[ptr + 2];
13457 contents[ptr + 2] = tmp;
13458 ptr += 4;
13459 }
13460 break;
13461
13462 case 't':
13463 /* Byte swap code halfwords. */
13464 while (ptr + 1 < end)
13465 {
13466 tmp = contents[ptr];
13467 contents[ptr] = contents[ptr + 1];
13468 contents[ptr + 1] = tmp;
13469 ptr += 2;
13470 }
13471 break;
13472
13473 case 'd':
13474 /* Leave data alone. */
13475 break;
13476 }
13477 ptr = end;
13478 }
13479 }
13480
13481 free (map);
13482 arm_data->mapcount = 0;
13483 arm_data->mapsize = 0;
13484 arm_data->map = NULL;
13485 unrecord_section_with_arm_elf_section_data (sec);
13486
13487 return FALSE;
13488 }
13489
13490 static void
13491 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13492 asection * sec,
13493 void * ignore ATTRIBUTE_UNUSED)
13494 {
13495 unrecord_section_with_arm_elf_section_data (sec);
13496 }
13497
13498 static bfd_boolean
13499 elf32_arm_close_and_cleanup (bfd * abfd)
13500 {
13501 if (abfd->sections)
13502 bfd_map_over_sections (abfd,
13503 unrecord_section_via_map_over_sections,
13504 NULL);
13505
13506 return _bfd_elf_close_and_cleanup (abfd);
13507 }
13508
13509 static bfd_boolean
13510 elf32_arm_bfd_free_cached_info (bfd * abfd)
13511 {
13512 if (abfd->sections)
13513 bfd_map_over_sections (abfd,
13514 unrecord_section_via_map_over_sections,
13515 NULL);
13516
13517 return _bfd_free_cached_info (abfd);
13518 }
13519
13520 /* Display STT_ARM_TFUNC symbols as functions. */
13521
13522 static void
13523 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13524 asymbol *asym)
13525 {
13526 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13527
13528 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13529 elfsym->symbol.flags |= BSF_FUNCTION;
13530 }
13531
13532
13533 /* Mangle thumb function symbols as we read them in. */
13534
13535 static bfd_boolean
13536 elf32_arm_swap_symbol_in (bfd * abfd,
13537 const void *psrc,
13538 const void *pshn,
13539 Elf_Internal_Sym *dst)
13540 {
13541 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13542 return FALSE;
13543
13544 /* New EABI objects mark thumb function symbols by setting the low bit of
13545 the address. Turn these into STT_ARM_TFUNC. */
13546 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13547 && (dst->st_value & 1))
13548 {
13549 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13550 dst->st_value &= ~(bfd_vma) 1;
13551 }
13552 return TRUE;
13553 }
13554
13555
13556 /* Mangle thumb function symbols as we write them out. */
13557
13558 static void
13559 elf32_arm_swap_symbol_out (bfd *abfd,
13560 const Elf_Internal_Sym *src,
13561 void *cdst,
13562 void *shndx)
13563 {
13564 Elf_Internal_Sym newsym;
13565
13566 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13567 of the address set, as per the new EABI. We do this unconditionally
13568 because objcopy does not set the elf header flags until after
13569 it writes out the symbol table. */
13570 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13571 {
13572 newsym = *src;
13573 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13574 if (newsym.st_shndx != SHN_UNDEF)
13575 {
13576 /* Do this only for defined symbols. At link type, the static
13577 linker will simulate the work of dynamic linker of resolving
13578 symbols and will carry over the thumbness of found symbols to
13579 the output symbol table. It's not clear how it happens, but
13580 the thumbness of undefined symbols can well be different at
13581 runtime, and writing '1' for them will be confusing for users
13582 and possibly for dynamic linker itself.
13583 */
13584 newsym.st_value |= 1;
13585 }
13586
13587 src = &newsym;
13588 }
13589 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13590 }
13591
13592 /* Add the PT_ARM_EXIDX program header. */
13593
13594 static bfd_boolean
13595 elf32_arm_modify_segment_map (bfd *abfd,
13596 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13597 {
13598 struct elf_segment_map *m;
13599 asection *sec;
13600
13601 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13602 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13603 {
13604 /* If there is already a PT_ARM_EXIDX header, then we do not
13605 want to add another one. This situation arises when running
13606 "strip"; the input binary already has the header. */
13607 m = elf_tdata (abfd)->segment_map;
13608 while (m && m->p_type != PT_ARM_EXIDX)
13609 m = m->next;
13610 if (!m)
13611 {
13612 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13613 if (m == NULL)
13614 return FALSE;
13615 m->p_type = PT_ARM_EXIDX;
13616 m->count = 1;
13617 m->sections[0] = sec;
13618
13619 m->next = elf_tdata (abfd)->segment_map;
13620 elf_tdata (abfd)->segment_map = m;
13621 }
13622 }
13623
13624 return TRUE;
13625 }
13626
13627 /* We may add a PT_ARM_EXIDX program header. */
13628
13629 static int
13630 elf32_arm_additional_program_headers (bfd *abfd,
13631 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13632 {
13633 asection *sec;
13634
13635 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13636 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13637 return 1;
13638 else
13639 return 0;
13640 }
13641
13642 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13643
13644 static bfd_boolean
13645 elf32_arm_is_function_type (unsigned int type)
13646 {
13647 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13648 }
13649
13650 /* We use this to override swap_symbol_in and swap_symbol_out. */
13651 const struct elf_size_info elf32_arm_size_info =
13652 {
13653 sizeof (Elf32_External_Ehdr),
13654 sizeof (Elf32_External_Phdr),
13655 sizeof (Elf32_External_Shdr),
13656 sizeof (Elf32_External_Rel),
13657 sizeof (Elf32_External_Rela),
13658 sizeof (Elf32_External_Sym),
13659 sizeof (Elf32_External_Dyn),
13660 sizeof (Elf_External_Note),
13661 4,
13662 1,
13663 32, 2,
13664 ELFCLASS32, EV_CURRENT,
13665 bfd_elf32_write_out_phdrs,
13666 bfd_elf32_write_shdrs_and_ehdr,
13667 bfd_elf32_checksum_contents,
13668 bfd_elf32_write_relocs,
13669 elf32_arm_swap_symbol_in,
13670 elf32_arm_swap_symbol_out,
13671 bfd_elf32_slurp_reloc_table,
13672 bfd_elf32_slurp_symbol_table,
13673 bfd_elf32_swap_dyn_in,
13674 bfd_elf32_swap_dyn_out,
13675 bfd_elf32_swap_reloc_in,
13676 bfd_elf32_swap_reloc_out,
13677 bfd_elf32_swap_reloca_in,
13678 bfd_elf32_swap_reloca_out
13679 };
13680
13681 #define ELF_ARCH bfd_arch_arm
13682 #define ELF_MACHINE_CODE EM_ARM
13683 #ifdef __QNXTARGET__
13684 #define ELF_MAXPAGESIZE 0x1000
13685 #else
13686 #define ELF_MAXPAGESIZE 0x8000
13687 #endif
13688 #define ELF_MINPAGESIZE 0x1000
13689 #define ELF_COMMONPAGESIZE 0x1000
13690
13691 #define bfd_elf32_mkobject elf32_arm_mkobject
13692
13693 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13694 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13695 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13696 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13697 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13698 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13699 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13700 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13701 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13702 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13703 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13704 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13705 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13706 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13707 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13708
13709 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13710 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13711 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13712 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13713 #define elf_backend_check_relocs elf32_arm_check_relocs
13714 #define elf_backend_relocate_section elf32_arm_relocate_section
13715 #define elf_backend_write_section elf32_arm_write_section
13716 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13717 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13718 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13719 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13720 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13721 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13722 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13723 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13724 #define elf_backend_object_p elf32_arm_object_p
13725 #define elf_backend_section_flags elf32_arm_section_flags
13726 #define elf_backend_fake_sections elf32_arm_fake_sections
13727 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13728 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13729 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13730 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13731 #define elf_backend_size_info elf32_arm_size_info
13732 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13733 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13734 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13735 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13736 #define elf_backend_is_function_type elf32_arm_is_function_type
13737
13738 #define elf_backend_can_refcount 1
13739 #define elf_backend_can_gc_sections 1
13740 #define elf_backend_plt_readonly 1
13741 #define elf_backend_want_got_plt 1
13742 #define elf_backend_want_plt_sym 0
13743 #define elf_backend_may_use_rel_p 1
13744 #define elf_backend_may_use_rela_p 0
13745 #define elf_backend_default_use_rela_p 0
13746
13747 #define elf_backend_got_header_size 12
13748
13749 #undef elf_backend_obj_attrs_vendor
13750 #define elf_backend_obj_attrs_vendor "aeabi"
13751 #undef elf_backend_obj_attrs_section
13752 #define elf_backend_obj_attrs_section ".ARM.attributes"
13753 #undef elf_backend_obj_attrs_arg_type
13754 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13755 #undef elf_backend_obj_attrs_section_type
13756 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13757 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13758
13759 #include "elf32-target.h"
13760
13761 /* VxWorks Targets. */
13762
13763 #undef TARGET_LITTLE_SYM
13764 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13765 #undef TARGET_LITTLE_NAME
13766 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13767 #undef TARGET_BIG_SYM
13768 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13769 #undef TARGET_BIG_NAME
13770 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13771
13772 /* Like elf32_arm_link_hash_table_create -- but overrides
13773 appropriately for VxWorks. */
13774
13775 static struct bfd_link_hash_table *
13776 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13777 {
13778 struct bfd_link_hash_table *ret;
13779
13780 ret = elf32_arm_link_hash_table_create (abfd);
13781 if (ret)
13782 {
13783 struct elf32_arm_link_hash_table *htab
13784 = (struct elf32_arm_link_hash_table *) ret;
13785 htab->use_rel = 0;
13786 htab->vxworks_p = 1;
13787 }
13788 return ret;
13789 }
13790
13791 static void
13792 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13793 {
13794 elf32_arm_final_write_processing (abfd, linker);
13795 elf_vxworks_final_write_processing (abfd, linker);
13796 }
13797
13798 #undef elf32_bed
13799 #define elf32_bed elf32_arm_vxworks_bed
13800
13801 #undef bfd_elf32_bfd_link_hash_table_create
13802 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13803 #undef elf_backend_add_symbol_hook
13804 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13805 #undef elf_backend_final_write_processing
13806 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13807 #undef elf_backend_emit_relocs
13808 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13809
13810 #undef elf_backend_may_use_rel_p
13811 #define elf_backend_may_use_rel_p 0
13812 #undef elf_backend_may_use_rela_p
13813 #define elf_backend_may_use_rela_p 1
13814 #undef elf_backend_default_use_rela_p
13815 #define elf_backend_default_use_rela_p 1
13816 #undef elf_backend_want_plt_sym
13817 #define elf_backend_want_plt_sym 1
13818 #undef ELF_MAXPAGESIZE
13819 #define ELF_MAXPAGESIZE 0x1000
13820
13821 #include "elf32-target.h"
13822
13823
13824 /* Symbian OS Targets. */
13825
13826 #undef TARGET_LITTLE_SYM
13827 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13828 #undef TARGET_LITTLE_NAME
13829 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13830 #undef TARGET_BIG_SYM
13831 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13832 #undef TARGET_BIG_NAME
13833 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13834
13835 /* Like elf32_arm_link_hash_table_create -- but overrides
13836 appropriately for Symbian OS. */
13837
13838 static struct bfd_link_hash_table *
13839 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13840 {
13841 struct bfd_link_hash_table *ret;
13842
13843 ret = elf32_arm_link_hash_table_create (abfd);
13844 if (ret)
13845 {
13846 struct elf32_arm_link_hash_table *htab
13847 = (struct elf32_arm_link_hash_table *)ret;
13848 /* There is no PLT header for Symbian OS. */
13849 htab->plt_header_size = 0;
13850 /* The PLT entries are each one instruction and one word. */
13851 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13852 htab->symbian_p = 1;
13853 /* Symbian uses armv5t or above, so use_blx is always true. */
13854 htab->use_blx = 1;
13855 htab->root.is_relocatable_executable = 1;
13856 }
13857 return ret;
13858 }
13859
13860 static const struct bfd_elf_special_section
13861 elf32_arm_symbian_special_sections[] =
13862 {
13863 /* In a BPABI executable, the dynamic linking sections do not go in
13864 the loadable read-only segment. The post-linker may wish to
13865 refer to these sections, but they are not part of the final
13866 program image. */
13867 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13868 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13869 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13870 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13871 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13872 /* These sections do not need to be writable as the SymbianOS
13873 postlinker will arrange things so that no dynamic relocation is
13874 required. */
13875 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13876 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13877 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13878 { NULL, 0, 0, 0, 0 }
13879 };
13880
13881 static void
13882 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13883 struct bfd_link_info *link_info)
13884 {
13885 /* BPABI objects are never loaded directly by an OS kernel; they are
13886 processed by a postlinker first, into an OS-specific format. If
13887 the D_PAGED bit is set on the file, BFD will align segments on
13888 page boundaries, so that an OS can directly map the file. With
13889 BPABI objects, that just results in wasted space. In addition,
13890 because we clear the D_PAGED bit, map_sections_to_segments will
13891 recognize that the program headers should not be mapped into any
13892 loadable segment. */
13893 abfd->flags &= ~D_PAGED;
13894 elf32_arm_begin_write_processing (abfd, link_info);
13895 }
13896
13897 static bfd_boolean
13898 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13899 struct bfd_link_info *info)
13900 {
13901 struct elf_segment_map *m;
13902 asection *dynsec;
13903
13904 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13905 segment. However, because the .dynamic section is not marked
13906 with SEC_LOAD, the generic ELF code will not create such a
13907 segment. */
13908 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13909 if (dynsec)
13910 {
13911 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13912 if (m->p_type == PT_DYNAMIC)
13913 break;
13914
13915 if (m == NULL)
13916 {
13917 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13918 m->next = elf_tdata (abfd)->segment_map;
13919 elf_tdata (abfd)->segment_map = m;
13920 }
13921 }
13922
13923 /* Also call the generic arm routine. */
13924 return elf32_arm_modify_segment_map (abfd, info);
13925 }
13926
13927 /* Return address for Ith PLT stub in section PLT, for relocation REL
13928 or (bfd_vma) -1 if it should not be included. */
13929
13930 static bfd_vma
13931 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13932 const arelent *rel ATTRIBUTE_UNUSED)
13933 {
13934 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13935 }
13936
13937
13938 #undef elf32_bed
13939 #define elf32_bed elf32_arm_symbian_bed
13940
13941 /* The dynamic sections are not allocated on SymbianOS; the postlinker
13942 will process them and then discard them. */
13943 #undef ELF_DYNAMIC_SEC_FLAGS
13944 #define ELF_DYNAMIC_SEC_FLAGS \
13945 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
13946
13947 #undef elf_backend_add_symbol_hook
13948 #undef elf_backend_emit_relocs
13949
13950 #undef bfd_elf32_bfd_link_hash_table_create
13951 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
13952 #undef elf_backend_special_sections
13953 #define elf_backend_special_sections elf32_arm_symbian_special_sections
13954 #undef elf_backend_begin_write_processing
13955 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
13956 #undef elf_backend_final_write_processing
13957 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13958
13959 #undef elf_backend_modify_segment_map
13960 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
13961
13962 /* There is no .got section for BPABI objects, and hence no header. */
13963 #undef elf_backend_got_header_size
13964 #define elf_backend_got_header_size 0
13965
13966 /* Similarly, there is no .got.plt section. */
13967 #undef elf_backend_want_got_plt
13968 #define elf_backend_want_got_plt 0
13969
13970 #undef elf_backend_plt_sym_val
13971 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
13972
13973 #undef elf_backend_may_use_rel_p
13974 #define elf_backend_may_use_rel_p 1
13975 #undef elf_backend_may_use_rela_p
13976 #define elf_backend_may_use_rela_p 0
13977 #undef elf_backend_default_use_rela_p
13978 #define elf_backend_default_use_rela_p 0
13979 #undef elf_backend_want_plt_sym
13980 #define elf_backend_want_plt_sym 0
13981 #undef ELF_MAXPAGESIZE
13982 #define ELF_MAXPAGESIZE 0x8000
13983
13984 #include "elf32-target.h"
This page took 0.438686 seconds and 5 git commands to generate.