bfd/
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 /* Note the first a8_veneer type */
2232 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2233 };
2234 #undef DEF_STUB
2235
2236 typedef struct
2237 {
2238 const insn_sequence* template;
2239 int template_size;
2240 } stub_def;
2241
2242 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2243 static const stub_def stub_definitions[] = {
2244 {NULL, 0},
2245 DEF_STUBS
2246 };
2247
2248 struct elf32_arm_stub_hash_entry
2249 {
2250 /* Base hash table entry structure. */
2251 struct bfd_hash_entry root;
2252
2253 /* The stub section. */
2254 asection *stub_sec;
2255
2256 /* Offset within stub_sec of the beginning of this stub. */
2257 bfd_vma stub_offset;
2258
2259 /* Given the symbol's value and its section we can determine its final
2260 value when building the stubs (so the stub knows where to jump). */
2261 bfd_vma target_value;
2262 asection *target_section;
2263
2264 /* Offset to apply to relocation referencing target_value. */
2265 bfd_vma target_addend;
2266
2267 /* The instruction which caused this stub to be generated (only valid for
2268 Cortex-A8 erratum workaround stubs at present). */
2269 unsigned long orig_insn;
2270
2271 /* The stub type. */
2272 enum elf32_arm_stub_type stub_type;
2273 /* Its encoding size in bytes. */
2274 int stub_size;
2275 /* Its template. */
2276 const insn_sequence *stub_template;
2277 /* The size of the template (number of entries). */
2278 int stub_template_size;
2279
2280 /* The symbol table entry, if any, that this was derived from. */
2281 struct elf32_arm_link_hash_entry *h;
2282
2283 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2284 unsigned char st_type;
2285
2286 /* Where this stub is being called from, or, in the case of combined
2287 stub sections, the first input section in the group. */
2288 asection *id_sec;
2289
2290 /* The name for the local symbol at the start of this stub. The
2291 stub name in the hash table has to be unique; this does not, so
2292 it can be friendlier. */
2293 char *output_name;
2294 };
2295
2296 /* Used to build a map of a section. This is required for mixed-endian
2297 code/data. */
2298
2299 typedef struct elf32_elf_section_map
2300 {
2301 bfd_vma vma;
2302 char type;
2303 }
2304 elf32_arm_section_map;
2305
2306 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2307
2308 typedef enum
2309 {
2310 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2311 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2312 VFP11_ERRATUM_ARM_VENEER,
2313 VFP11_ERRATUM_THUMB_VENEER
2314 }
2315 elf32_vfp11_erratum_type;
2316
2317 typedef struct elf32_vfp11_erratum_list
2318 {
2319 struct elf32_vfp11_erratum_list *next;
2320 bfd_vma vma;
2321 union
2322 {
2323 struct
2324 {
2325 struct elf32_vfp11_erratum_list *veneer;
2326 unsigned int vfp_insn;
2327 } b;
2328 struct
2329 {
2330 struct elf32_vfp11_erratum_list *branch;
2331 unsigned int id;
2332 } v;
2333 } u;
2334 elf32_vfp11_erratum_type type;
2335 }
2336 elf32_vfp11_erratum_list;
2337
2338 typedef enum
2339 {
2340 DELETE_EXIDX_ENTRY,
2341 INSERT_EXIDX_CANTUNWIND_AT_END
2342 }
2343 arm_unwind_edit_type;
2344
2345 /* A (sorted) list of edits to apply to an unwind table. */
2346 typedef struct arm_unwind_table_edit
2347 {
2348 arm_unwind_edit_type type;
2349 /* Note: we sometimes want to insert an unwind entry corresponding to a
2350 section different from the one we're currently writing out, so record the
2351 (text) section this edit relates to here. */
2352 asection *linked_section;
2353 unsigned int index;
2354 struct arm_unwind_table_edit *next;
2355 }
2356 arm_unwind_table_edit;
2357
2358 typedef struct _arm_elf_section_data
2359 {
2360 /* Information about mapping symbols. */
2361 struct bfd_elf_section_data elf;
2362 unsigned int mapcount;
2363 unsigned int mapsize;
2364 elf32_arm_section_map *map;
2365 /* Information about CPU errata. */
2366 unsigned int erratumcount;
2367 elf32_vfp11_erratum_list *erratumlist;
2368 /* Information about unwind tables. */
2369 union
2370 {
2371 /* Unwind info attached to a text section. */
2372 struct
2373 {
2374 asection *arm_exidx_sec;
2375 } text;
2376
2377 /* Unwind info attached to an .ARM.exidx section. */
2378 struct
2379 {
2380 arm_unwind_table_edit *unwind_edit_list;
2381 arm_unwind_table_edit *unwind_edit_tail;
2382 } exidx;
2383 } u;
2384 }
2385 _arm_elf_section_data;
2386
2387 #define elf32_arm_section_data(sec) \
2388 ((_arm_elf_section_data *) elf_section_data (sec))
2389
2390 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2391 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2392 so may be created multiple times: we use an array of these entries whilst
2393 relaxing which we can refresh easily, then create stubs for each potentially
2394 erratum-triggering instruction once we've settled on a solution. */
2395
2396 struct a8_erratum_fix {
2397 bfd *input_bfd;
2398 asection *section;
2399 bfd_vma offset;
2400 bfd_vma addend;
2401 unsigned long orig_insn;
2402 char *stub_name;
2403 enum elf32_arm_stub_type stub_type;
2404 };
2405
2406 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 erratum. */
2408
2409 struct a8_erratum_reloc {
2410 bfd_vma from;
2411 bfd_vma destination;
2412 unsigned int r_type;
2413 unsigned char st_type;
2414 const char *sym_name;
2415 bfd_boolean non_a8_stub;
2416 };
2417
2418 /* The size of the thread control block. */
2419 #define TCB_SIZE 8
2420
2421 struct elf_arm_obj_tdata
2422 {
2423 struct elf_obj_tdata root;
2424
2425 /* tls_type for each local got entry. */
2426 char *local_got_tls_type;
2427
2428 /* Zero to warn when linking objects with incompatible enum sizes. */
2429 int no_enum_size_warning;
2430
2431 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2432 int no_wchar_size_warning;
2433 };
2434
2435 #define elf_arm_tdata(bfd) \
2436 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2437
2438 #define elf32_arm_local_got_tls_type(bfd) \
2439 (elf_arm_tdata (bfd)->local_got_tls_type)
2440
2441 #define is_arm_elf(bfd) \
2442 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2443 && elf_tdata (bfd) != NULL \
2444 && elf_object_id (bfd) == ARM_ELF_TDATA)
2445
2446 static bfd_boolean
2447 elf32_arm_mkobject (bfd *abfd)
2448 {
2449 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2450 ARM_ELF_TDATA);
2451 }
2452
2453 /* The ARM linker needs to keep track of the number of relocs that it
2454 decides to copy in check_relocs for each symbol. This is so that
2455 it can discard PC relative relocs if it doesn't need them when
2456 linking with -Bsymbolic. We store the information in a field
2457 extending the regular ELF linker hash table. */
2458
2459 /* This structure keeps track of the number of relocs we have copied
2460 for a given symbol. */
2461 struct elf32_arm_relocs_copied
2462 {
2463 /* Next section. */
2464 struct elf32_arm_relocs_copied * next;
2465 /* A section in dynobj. */
2466 asection * section;
2467 /* Number of relocs copied in this section. */
2468 bfd_size_type count;
2469 /* Number of PC-relative relocs copied in this section. */
2470 bfd_size_type pc_count;
2471 };
2472
2473 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2474
2475 /* Arm ELF linker hash entry. */
2476 struct elf32_arm_link_hash_entry
2477 {
2478 struct elf_link_hash_entry root;
2479
2480 /* Number of PC relative relocs copied for this symbol. */
2481 struct elf32_arm_relocs_copied * relocs_copied;
2482
2483 /* We reference count Thumb references to a PLT entry separately,
2484 so that we can emit the Thumb trampoline only if needed. */
2485 bfd_signed_vma plt_thumb_refcount;
2486
2487 /* Some references from Thumb code may be eliminated by BL->BLX
2488 conversion, so record them separately. */
2489 bfd_signed_vma plt_maybe_thumb_refcount;
2490
2491 /* Since PLT entries have variable size if the Thumb prologue is
2492 used, we need to record the index into .got.plt instead of
2493 recomputing it from the PLT offset. */
2494 bfd_signed_vma plt_got_offset;
2495
2496 #define GOT_UNKNOWN 0
2497 #define GOT_NORMAL 1
2498 #define GOT_TLS_GD 2
2499 #define GOT_TLS_IE 4
2500 unsigned char tls_type;
2501
2502 /* The symbol marking the real symbol location for exported thumb
2503 symbols with Arm stubs. */
2504 struct elf_link_hash_entry *export_glue;
2505
2506 /* A pointer to the most recently used stub hash entry against this
2507 symbol. */
2508 struct elf32_arm_stub_hash_entry *stub_cache;
2509 };
2510
2511 /* Traverse an arm ELF linker hash table. */
2512 #define elf32_arm_link_hash_traverse(table, func, info) \
2513 (elf_link_hash_traverse \
2514 (&(table)->root, \
2515 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2516 (info)))
2517
2518 /* Get the ARM elf linker hash table from a link_info structure. */
2519 #define elf32_arm_hash_table(info) \
2520 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2521
2522 #define arm_stub_hash_lookup(table, string, create, copy) \
2523 ((struct elf32_arm_stub_hash_entry *) \
2524 bfd_hash_lookup ((table), (string), (create), (copy)))
2525
2526 /* ARM ELF linker hash table. */
2527 struct elf32_arm_link_hash_table
2528 {
2529 /* The main hash table. */
2530 struct elf_link_hash_table root;
2531
2532 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2533 bfd_size_type thumb_glue_size;
2534
2535 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2536 bfd_size_type arm_glue_size;
2537
2538 /* The size in bytes of section containing the ARMv4 BX veneers. */
2539 bfd_size_type bx_glue_size;
2540
2541 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2542 veneer has been populated. */
2543 bfd_vma bx_glue_offset[15];
2544
2545 /* The size in bytes of the section containing glue for VFP11 erratum
2546 veneers. */
2547 bfd_size_type vfp11_erratum_glue_size;
2548
2549 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2550 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2551 elf32_arm_write_section(). */
2552 struct a8_erratum_fix *a8_erratum_fixes;
2553 unsigned int num_a8_erratum_fixes;
2554
2555 /* An arbitrary input BFD chosen to hold the glue sections. */
2556 bfd * bfd_of_glue_owner;
2557
2558 /* Nonzero to output a BE8 image. */
2559 int byteswap_code;
2560
2561 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2562 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2563 int target1_is_rel;
2564
2565 /* The relocation to use for R_ARM_TARGET2 relocations. */
2566 int target2_reloc;
2567
2568 /* 0 = Ignore R_ARM_V4BX.
2569 1 = Convert BX to MOV PC.
2570 2 = Generate v4 interworing stubs. */
2571 int fix_v4bx;
2572
2573 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2574 int fix_cortex_a8;
2575
2576 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2577 int use_blx;
2578
2579 /* What sort of code sequences we should look for which may trigger the
2580 VFP11 denorm erratum. */
2581 bfd_arm_vfp11_fix vfp11_fix;
2582
2583 /* Global counter for the number of fixes we have emitted. */
2584 int num_vfp11_fixes;
2585
2586 /* Nonzero to force PIC branch veneers. */
2587 int pic_veneer;
2588
2589 /* The number of bytes in the initial entry in the PLT. */
2590 bfd_size_type plt_header_size;
2591
2592 /* The number of bytes in the subsequent PLT etries. */
2593 bfd_size_type plt_entry_size;
2594
2595 /* True if the target system is VxWorks. */
2596 int vxworks_p;
2597
2598 /* True if the target system is Symbian OS. */
2599 int symbian_p;
2600
2601 /* True if the target uses REL relocations. */
2602 int use_rel;
2603
2604 /* Short-cuts to get to dynamic linker sections. */
2605 asection *sgot;
2606 asection *sgotplt;
2607 asection *srelgot;
2608 asection *splt;
2609 asection *srelplt;
2610 asection *sdynbss;
2611 asection *srelbss;
2612
2613 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2614 asection *srelplt2;
2615
2616 /* Data for R_ARM_TLS_LDM32 relocations. */
2617 union
2618 {
2619 bfd_signed_vma refcount;
2620 bfd_vma offset;
2621 } tls_ldm_got;
2622
2623 /* Small local sym cache. */
2624 struct sym_cache sym_cache;
2625
2626 /* For convenience in allocate_dynrelocs. */
2627 bfd * obfd;
2628
2629 /* The stub hash table. */
2630 struct bfd_hash_table stub_hash_table;
2631
2632 /* Linker stub bfd. */
2633 bfd *stub_bfd;
2634
2635 /* Linker call-backs. */
2636 asection * (*add_stub_section) (const char *, asection *);
2637 void (*layout_sections_again) (void);
2638
2639 /* Array to keep track of which stub sections have been created, and
2640 information on stub grouping. */
2641 struct map_stub
2642 {
2643 /* This is the section to which stubs in the group will be
2644 attached. */
2645 asection *link_sec;
2646 /* The stub section. */
2647 asection *stub_sec;
2648 } *stub_group;
2649
2650 /* Assorted information used by elf32_arm_size_stubs. */
2651 unsigned int bfd_count;
2652 int top_index;
2653 asection **input_list;
2654 };
2655
2656 /* Create an entry in an ARM ELF linker hash table. */
2657
2658 static struct bfd_hash_entry *
2659 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2660 struct bfd_hash_table * table,
2661 const char * string)
2662 {
2663 struct elf32_arm_link_hash_entry * ret =
2664 (struct elf32_arm_link_hash_entry *) entry;
2665
2666 /* Allocate the structure if it has not already been allocated by a
2667 subclass. */
2668 if (ret == NULL)
2669 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2670 if (ret == NULL)
2671 return (struct bfd_hash_entry *) ret;
2672
2673 /* Call the allocation method of the superclass. */
2674 ret = ((struct elf32_arm_link_hash_entry *)
2675 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2676 table, string));
2677 if (ret != NULL)
2678 {
2679 ret->relocs_copied = NULL;
2680 ret->tls_type = GOT_UNKNOWN;
2681 ret->plt_thumb_refcount = 0;
2682 ret->plt_maybe_thumb_refcount = 0;
2683 ret->plt_got_offset = -1;
2684 ret->export_glue = NULL;
2685
2686 ret->stub_cache = NULL;
2687 }
2688
2689 return (struct bfd_hash_entry *) ret;
2690 }
2691
2692 /* Initialize an entry in the stub hash table. */
2693
2694 static struct bfd_hash_entry *
2695 stub_hash_newfunc (struct bfd_hash_entry *entry,
2696 struct bfd_hash_table *table,
2697 const char *string)
2698 {
2699 /* Allocate the structure if it has not already been allocated by a
2700 subclass. */
2701 if (entry == NULL)
2702 {
2703 entry = bfd_hash_allocate (table,
2704 sizeof (struct elf32_arm_stub_hash_entry));
2705 if (entry == NULL)
2706 return entry;
2707 }
2708
2709 /* Call the allocation method of the superclass. */
2710 entry = bfd_hash_newfunc (entry, table, string);
2711 if (entry != NULL)
2712 {
2713 struct elf32_arm_stub_hash_entry *eh;
2714
2715 /* Initialize the local fields. */
2716 eh = (struct elf32_arm_stub_hash_entry *) entry;
2717 eh->stub_sec = NULL;
2718 eh->stub_offset = 0;
2719 eh->target_value = 0;
2720 eh->target_section = NULL;
2721 eh->target_addend = 0;
2722 eh->orig_insn = 0;
2723 eh->stub_type = arm_stub_none;
2724 eh->stub_size = 0;
2725 eh->stub_template = NULL;
2726 eh->stub_template_size = 0;
2727 eh->h = NULL;
2728 eh->id_sec = NULL;
2729 eh->output_name = NULL;
2730 }
2731
2732 return entry;
2733 }
2734
2735 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2736 shortcuts to them in our hash table. */
2737
2738 static bfd_boolean
2739 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2740 {
2741 struct elf32_arm_link_hash_table *htab;
2742
2743 htab = elf32_arm_hash_table (info);
2744 /* BPABI objects never have a GOT, or associated sections. */
2745 if (htab->symbian_p)
2746 return TRUE;
2747
2748 if (! _bfd_elf_create_got_section (dynobj, info))
2749 return FALSE;
2750
2751 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2752 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2753 if (!htab->sgot || !htab->sgotplt)
2754 abort ();
2755
2756 htab->srelgot = bfd_get_section_by_name (dynobj,
2757 RELOC_SECTION (htab, ".got"));
2758 if (htab->srelgot == NULL)
2759 return FALSE;
2760 return TRUE;
2761 }
2762
2763 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2764 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2765 hash table. */
2766
2767 static bfd_boolean
2768 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2769 {
2770 struct elf32_arm_link_hash_table *htab;
2771
2772 htab = elf32_arm_hash_table (info);
2773 if (!htab->sgot && !create_got_section (dynobj, info))
2774 return FALSE;
2775
2776 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2777 return FALSE;
2778
2779 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2780 htab->srelplt = bfd_get_section_by_name (dynobj,
2781 RELOC_SECTION (htab, ".plt"));
2782 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2783 if (!info->shared)
2784 htab->srelbss = bfd_get_section_by_name (dynobj,
2785 RELOC_SECTION (htab, ".bss"));
2786
2787 if (htab->vxworks_p)
2788 {
2789 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2790 return FALSE;
2791
2792 if (info->shared)
2793 {
2794 htab->plt_header_size = 0;
2795 htab->plt_entry_size
2796 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2797 }
2798 else
2799 {
2800 htab->plt_header_size
2801 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2802 htab->plt_entry_size
2803 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2804 }
2805 }
2806
2807 if (!htab->splt
2808 || !htab->srelplt
2809 || !htab->sdynbss
2810 || (!info->shared && !htab->srelbss))
2811 abort ();
2812
2813 return TRUE;
2814 }
2815
2816 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2817
2818 static void
2819 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2820 struct elf_link_hash_entry *dir,
2821 struct elf_link_hash_entry *ind)
2822 {
2823 struct elf32_arm_link_hash_entry *edir, *eind;
2824
2825 edir = (struct elf32_arm_link_hash_entry *) dir;
2826 eind = (struct elf32_arm_link_hash_entry *) ind;
2827
2828 if (eind->relocs_copied != NULL)
2829 {
2830 if (edir->relocs_copied != NULL)
2831 {
2832 struct elf32_arm_relocs_copied **pp;
2833 struct elf32_arm_relocs_copied *p;
2834
2835 /* Add reloc counts against the indirect sym to the direct sym
2836 list. Merge any entries against the same section. */
2837 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2838 {
2839 struct elf32_arm_relocs_copied *q;
2840
2841 for (q = edir->relocs_copied; q != NULL; q = q->next)
2842 if (q->section == p->section)
2843 {
2844 q->pc_count += p->pc_count;
2845 q->count += p->count;
2846 *pp = p->next;
2847 break;
2848 }
2849 if (q == NULL)
2850 pp = &p->next;
2851 }
2852 *pp = edir->relocs_copied;
2853 }
2854
2855 edir->relocs_copied = eind->relocs_copied;
2856 eind->relocs_copied = NULL;
2857 }
2858
2859 if (ind->root.type == bfd_link_hash_indirect)
2860 {
2861 /* Copy over PLT info. */
2862 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2863 eind->plt_thumb_refcount = 0;
2864 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2865 eind->plt_maybe_thumb_refcount = 0;
2866
2867 if (dir->got.refcount <= 0)
2868 {
2869 edir->tls_type = eind->tls_type;
2870 eind->tls_type = GOT_UNKNOWN;
2871 }
2872 }
2873
2874 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2875 }
2876
2877 /* Create an ARM elf linker hash table. */
2878
2879 static struct bfd_link_hash_table *
2880 elf32_arm_link_hash_table_create (bfd *abfd)
2881 {
2882 struct elf32_arm_link_hash_table *ret;
2883 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2884
2885 ret = bfd_malloc (amt);
2886 if (ret == NULL)
2887 return NULL;
2888
2889 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2890 elf32_arm_link_hash_newfunc,
2891 sizeof (struct elf32_arm_link_hash_entry)))
2892 {
2893 free (ret);
2894 return NULL;
2895 }
2896
2897 ret->sgot = NULL;
2898 ret->sgotplt = NULL;
2899 ret->srelgot = NULL;
2900 ret->splt = NULL;
2901 ret->srelplt = NULL;
2902 ret->sdynbss = NULL;
2903 ret->srelbss = NULL;
2904 ret->srelplt2 = NULL;
2905 ret->thumb_glue_size = 0;
2906 ret->arm_glue_size = 0;
2907 ret->bx_glue_size = 0;
2908 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2909 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2910 ret->vfp11_erratum_glue_size = 0;
2911 ret->num_vfp11_fixes = 0;
2912 ret->fix_cortex_a8 = 0;
2913 ret->bfd_of_glue_owner = NULL;
2914 ret->byteswap_code = 0;
2915 ret->target1_is_rel = 0;
2916 ret->target2_reloc = R_ARM_NONE;
2917 #ifdef FOUR_WORD_PLT
2918 ret->plt_header_size = 16;
2919 ret->plt_entry_size = 16;
2920 #else
2921 ret->plt_header_size = 20;
2922 ret->plt_entry_size = 12;
2923 #endif
2924 ret->fix_v4bx = 0;
2925 ret->use_blx = 0;
2926 ret->vxworks_p = 0;
2927 ret->symbian_p = 0;
2928 ret->use_rel = 1;
2929 ret->sym_cache.abfd = NULL;
2930 ret->obfd = abfd;
2931 ret->tls_ldm_got.refcount = 0;
2932 ret->stub_bfd = NULL;
2933 ret->add_stub_section = NULL;
2934 ret->layout_sections_again = NULL;
2935 ret->stub_group = NULL;
2936 ret->bfd_count = 0;
2937 ret->top_index = 0;
2938 ret->input_list = NULL;
2939
2940 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2941 sizeof (struct elf32_arm_stub_hash_entry)))
2942 {
2943 free (ret);
2944 return NULL;
2945 }
2946
2947 return &ret->root.root;
2948 }
2949
2950 /* Free the derived linker hash table. */
2951
2952 static void
2953 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2954 {
2955 struct elf32_arm_link_hash_table *ret
2956 = (struct elf32_arm_link_hash_table *) hash;
2957
2958 bfd_hash_table_free (&ret->stub_hash_table);
2959 _bfd_generic_link_hash_table_free (hash);
2960 }
2961
2962 /* Determine if we're dealing with a Thumb only architecture. */
2963
2964 static bfd_boolean
2965 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2966 {
2967 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2968 Tag_CPU_arch);
2969 int profile;
2970
2971 if (arch != TAG_CPU_ARCH_V7)
2972 return FALSE;
2973
2974 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2975 Tag_CPU_arch_profile);
2976
2977 return profile == 'M';
2978 }
2979
2980 /* Determine if we're dealing with a Thumb-2 object. */
2981
2982 static bfd_boolean
2983 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2984 {
2985 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch);
2987 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2988 }
2989
2990 static bfd_boolean
2991 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2992 {
2993 switch (stub_type)
2994 {
2995 case arm_stub_long_branch_thumb_only:
2996 case arm_stub_long_branch_v4t_thumb_arm:
2997 case arm_stub_short_branch_v4t_thumb_arm:
2998 case arm_stub_long_branch_v4t_thumb_arm_pic:
2999 case arm_stub_long_branch_thumb_only_pic:
3000 return TRUE;
3001 case arm_stub_none:
3002 BFD_FAIL ();
3003 return FALSE;
3004 break;
3005 default:
3006 return FALSE;
3007 }
3008 }
3009
3010 /* Determine the type of stub needed, if any, for a call. */
3011
3012 static enum elf32_arm_stub_type
3013 arm_type_of_stub (struct bfd_link_info *info,
3014 asection *input_sec,
3015 const Elf_Internal_Rela *rel,
3016 unsigned char st_type,
3017 struct elf32_arm_link_hash_entry *hash,
3018 bfd_vma destination,
3019 asection *sym_sec,
3020 bfd *input_bfd,
3021 const char *name)
3022 {
3023 bfd_vma location;
3024 bfd_signed_vma branch_offset;
3025 unsigned int r_type;
3026 struct elf32_arm_link_hash_table * globals;
3027 int thumb2;
3028 int thumb_only;
3029 enum elf32_arm_stub_type stub_type = arm_stub_none;
3030 int use_plt = 0;
3031
3032 /* We don't know the actual type of destination in case it is of
3033 type STT_SECTION: give up. */
3034 if (st_type == STT_SECTION)
3035 return stub_type;
3036
3037 globals = elf32_arm_hash_table (info);
3038
3039 thumb_only = using_thumb_only (globals);
3040
3041 thumb2 = using_thumb2 (globals);
3042
3043 /* Determine where the call point is. */
3044 location = (input_sec->output_offset
3045 + input_sec->output_section->vma
3046 + rel->r_offset);
3047
3048 branch_offset = (bfd_signed_vma)(destination - location);
3049
3050 r_type = ELF32_R_TYPE (rel->r_info);
3051
3052 /* Keep a simpler condition, for the sake of clarity. */
3053 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3054 {
3055 use_plt = 1;
3056 /* Note when dealing with PLT entries: the main PLT stub is in
3057 ARM mode, so if the branch is in Thumb mode, another
3058 Thumb->ARM stub will be inserted later just before the ARM
3059 PLT stub. We don't take this extra distance into account
3060 here, because if a long branch stub is needed, we'll add a
3061 Thumb->Arm one and branch directly to the ARM PLT entry
3062 because it avoids spreading offset corrections in several
3063 places. */
3064 }
3065
3066 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3067 {
3068 /* Handle cases where:
3069 - this call goes too far (different Thumb/Thumb2 max
3070 distance)
3071 - it's a Thumb->Arm call and blx is not available, or it's a
3072 Thumb->Arm branch (not bl). A stub is needed in this case,
3073 but only if this call is not through a PLT entry. Indeed,
3074 PLT stubs handle mode switching already.
3075 */
3076 if ((!thumb2
3077 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3078 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3079 || (thumb2
3080 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3081 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3082 || ((st_type != STT_ARM_TFUNC)
3083 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3084 || (r_type == R_ARM_THM_JUMP24))
3085 && !use_plt))
3086 {
3087 if (st_type == STT_ARM_TFUNC)
3088 {
3089 /* Thumb to thumb. */
3090 if (!thumb_only)
3091 {
3092 stub_type = (info->shared | globals->pic_veneer)
3093 /* PIC stubs. */
3094 ? ((globals->use_blx
3095 && (r_type ==R_ARM_THM_CALL))
3096 /* V5T and above. Stub starts with ARM code, so
3097 we must be able to switch mode before
3098 reaching it, which is only possible for 'bl'
3099 (ie R_ARM_THM_CALL relocation). */
3100 ? arm_stub_long_branch_any_thumb_pic
3101 /* On V4T, use Thumb code only. */
3102 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3103
3104 /* non-PIC stubs. */
3105 : ((globals->use_blx
3106 && (r_type ==R_ARM_THM_CALL))
3107 /* V5T and above. */
3108 ? arm_stub_long_branch_any_any
3109 /* V4T. */
3110 : arm_stub_long_branch_v4t_thumb_thumb);
3111 }
3112 else
3113 {
3114 stub_type = (info->shared | globals->pic_veneer)
3115 /* PIC stub. */
3116 ? arm_stub_long_branch_thumb_only_pic
3117 /* non-PIC stub. */
3118 : arm_stub_long_branch_thumb_only;
3119 }
3120 }
3121 else
3122 {
3123 /* Thumb to arm. */
3124 if (sym_sec != NULL
3125 && sym_sec->owner != NULL
3126 && !INTERWORK_FLAG (sym_sec->owner))
3127 {
3128 (*_bfd_error_handler)
3129 (_("%B(%s): warning: interworking not enabled.\n"
3130 " first occurrence: %B: Thumb call to ARM"),
3131 sym_sec->owner, input_bfd, name);
3132 }
3133
3134 stub_type = (info->shared | globals->pic_veneer)
3135 /* PIC stubs. */
3136 ? ((globals->use_blx
3137 && (r_type ==R_ARM_THM_CALL))
3138 /* V5T and above. */
3139 ? arm_stub_long_branch_any_arm_pic
3140 /* V4T PIC stub. */
3141 : arm_stub_long_branch_v4t_thumb_arm_pic)
3142
3143 /* non-PIC stubs. */
3144 : ((globals->use_blx
3145 && (r_type ==R_ARM_THM_CALL))
3146 /* V5T and above. */
3147 ? arm_stub_long_branch_any_any
3148 /* V4T. */
3149 : arm_stub_long_branch_v4t_thumb_arm);
3150
3151 /* Handle v4t short branches. */
3152 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3153 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3154 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3155 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3156 }
3157 }
3158 }
3159 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3160 {
3161 if (st_type == STT_ARM_TFUNC)
3162 {
3163 /* Arm to thumb. */
3164
3165 if (sym_sec != NULL
3166 && sym_sec->owner != NULL
3167 && !INTERWORK_FLAG (sym_sec->owner))
3168 {
3169 (*_bfd_error_handler)
3170 (_("%B(%s): warning: interworking not enabled.\n"
3171 " first occurrence: %B: ARM call to Thumb"),
3172 sym_sec->owner, input_bfd, name);
3173 }
3174
3175 /* We have an extra 2-bytes reach because of
3176 the mode change (bit 24 (H) of BLX encoding). */
3177 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3178 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3179 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3180 || (r_type == R_ARM_JUMP24)
3181 || (r_type == R_ARM_PLT32))
3182 {
3183 stub_type = (info->shared | globals->pic_veneer)
3184 /* PIC stubs. */
3185 ? ((globals->use_blx)
3186 /* V5T and above. */
3187 ? arm_stub_long_branch_any_thumb_pic
3188 /* V4T stub. */
3189 : arm_stub_long_branch_v4t_arm_thumb_pic)
3190
3191 /* non-PIC stubs. */
3192 : ((globals->use_blx)
3193 /* V5T and above. */
3194 ? arm_stub_long_branch_any_any
3195 /* V4T. */
3196 : arm_stub_long_branch_v4t_arm_thumb);
3197 }
3198 }
3199 else
3200 {
3201 /* Arm to arm. */
3202 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3203 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3204 {
3205 stub_type = (info->shared | globals->pic_veneer)
3206 /* PIC stubs. */
3207 ? arm_stub_long_branch_any_arm_pic
3208 /* non-PIC stubs. */
3209 : arm_stub_long_branch_any_any;
3210 }
3211 }
3212 }
3213
3214 return stub_type;
3215 }
3216
3217 /* Build a name for an entry in the stub hash table. */
3218
3219 static char *
3220 elf32_arm_stub_name (const asection *input_section,
3221 const asection *sym_sec,
3222 const struct elf32_arm_link_hash_entry *hash,
3223 const Elf_Internal_Rela *rel)
3224 {
3225 char *stub_name;
3226 bfd_size_type len;
3227
3228 if (hash)
3229 {
3230 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3231 stub_name = bfd_malloc (len);
3232 if (stub_name != NULL)
3233 sprintf (stub_name, "%08x_%s+%x",
3234 input_section->id & 0xffffffff,
3235 hash->root.root.root.string,
3236 (int) rel->r_addend & 0xffffffff);
3237 }
3238 else
3239 {
3240 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3241 stub_name = bfd_malloc (len);
3242 if (stub_name != NULL)
3243 sprintf (stub_name, "%08x_%x:%x+%x",
3244 input_section->id & 0xffffffff,
3245 sym_sec->id & 0xffffffff,
3246 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3247 (int) rel->r_addend & 0xffffffff);
3248 }
3249
3250 return stub_name;
3251 }
3252
3253 /* Look up an entry in the stub hash. Stub entries are cached because
3254 creating the stub name takes a bit of time. */
3255
3256 static struct elf32_arm_stub_hash_entry *
3257 elf32_arm_get_stub_entry (const asection *input_section,
3258 const asection *sym_sec,
3259 struct elf_link_hash_entry *hash,
3260 const Elf_Internal_Rela *rel,
3261 struct elf32_arm_link_hash_table *htab)
3262 {
3263 struct elf32_arm_stub_hash_entry *stub_entry;
3264 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3265 const asection *id_sec;
3266
3267 if ((input_section->flags & SEC_CODE) == 0)
3268 return NULL;
3269
3270 /* If this input section is part of a group of sections sharing one
3271 stub section, then use the id of the first section in the group.
3272 Stub names need to include a section id, as there may well be
3273 more than one stub used to reach say, printf, and we need to
3274 distinguish between them. */
3275 id_sec = htab->stub_group[input_section->id].link_sec;
3276
3277 if (h != NULL && h->stub_cache != NULL
3278 && h->stub_cache->h == h
3279 && h->stub_cache->id_sec == id_sec)
3280 {
3281 stub_entry = h->stub_cache;
3282 }
3283 else
3284 {
3285 char *stub_name;
3286
3287 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3288 if (stub_name == NULL)
3289 return NULL;
3290
3291 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3292 stub_name, FALSE, FALSE);
3293 if (h != NULL)
3294 h->stub_cache = stub_entry;
3295
3296 free (stub_name);
3297 }
3298
3299 return stub_entry;
3300 }
3301
3302 /* Find or create a stub section. Returns a pointer to the stub section, and
3303 the section to which the stub section will be attached (in *LINK_SEC_P).
3304 LINK_SEC_P may be NULL. */
3305
3306 static asection *
3307 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3308 struct elf32_arm_link_hash_table *htab)
3309 {
3310 asection *link_sec;
3311 asection *stub_sec;
3312
3313 link_sec = htab->stub_group[section->id].link_sec;
3314 stub_sec = htab->stub_group[section->id].stub_sec;
3315 if (stub_sec == NULL)
3316 {
3317 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3318 if (stub_sec == NULL)
3319 {
3320 size_t namelen;
3321 bfd_size_type len;
3322 char *s_name;
3323
3324 namelen = strlen (link_sec->name);
3325 len = namelen + sizeof (STUB_SUFFIX);
3326 s_name = bfd_alloc (htab->stub_bfd, len);
3327 if (s_name == NULL)
3328 return NULL;
3329
3330 memcpy (s_name, link_sec->name, namelen);
3331 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3332 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3333 if (stub_sec == NULL)
3334 return NULL;
3335 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3336 }
3337 htab->stub_group[section->id].stub_sec = stub_sec;
3338 }
3339
3340 if (link_sec_p)
3341 *link_sec_p = link_sec;
3342
3343 return stub_sec;
3344 }
3345
3346 /* Add a new stub entry to the stub hash. Not all fields of the new
3347 stub entry are initialised. */
3348
3349 static struct elf32_arm_stub_hash_entry *
3350 elf32_arm_add_stub (const char *stub_name,
3351 asection *section,
3352 struct elf32_arm_link_hash_table *htab)
3353 {
3354 asection *link_sec;
3355 asection *stub_sec;
3356 struct elf32_arm_stub_hash_entry *stub_entry;
3357
3358 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3359 if (stub_sec == NULL)
3360 return NULL;
3361
3362 /* Enter this entry into the linker stub hash table. */
3363 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3364 TRUE, FALSE);
3365 if (stub_entry == NULL)
3366 {
3367 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3368 section->owner,
3369 stub_name);
3370 return NULL;
3371 }
3372
3373 stub_entry->stub_sec = stub_sec;
3374 stub_entry->stub_offset = 0;
3375 stub_entry->id_sec = link_sec;
3376
3377 return stub_entry;
3378 }
3379
3380 /* Store an Arm insn into an output section not processed by
3381 elf32_arm_write_section. */
3382
3383 static void
3384 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3385 bfd * output_bfd, bfd_vma val, void * ptr)
3386 {
3387 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3388 bfd_putl32 (val, ptr);
3389 else
3390 bfd_putb32 (val, ptr);
3391 }
3392
3393 /* Store a 16-bit Thumb insn into an output section not processed by
3394 elf32_arm_write_section. */
3395
3396 static void
3397 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3398 bfd * output_bfd, bfd_vma val, void * ptr)
3399 {
3400 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3401 bfd_putl16 (val, ptr);
3402 else
3403 bfd_putb16 (val, ptr);
3404 }
3405
3406 static bfd_reloc_status_type elf32_arm_final_link_relocate
3407 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3408 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3409 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3410
3411 static bfd_boolean
3412 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3413 void * in_arg)
3414 {
3415 #define MAXRELOCS 2
3416 struct elf32_arm_stub_hash_entry *stub_entry;
3417 struct bfd_link_info *info;
3418 struct elf32_arm_link_hash_table *htab;
3419 asection *stub_sec;
3420 bfd *stub_bfd;
3421 bfd_vma stub_addr;
3422 bfd_byte *loc;
3423 bfd_vma sym_value;
3424 int template_size;
3425 int size;
3426 const insn_sequence *template;
3427 int i;
3428 struct elf32_arm_link_hash_table * globals;
3429 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3430 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3431 int nrelocs = 0;
3432
3433 /* Massage our args to the form they really have. */
3434 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3435 info = (struct bfd_link_info *) in_arg;
3436
3437 globals = elf32_arm_hash_table (info);
3438
3439 htab = elf32_arm_hash_table (info);
3440 stub_sec = stub_entry->stub_sec;
3441
3442 if ((htab->fix_cortex_a8 < 0)
3443 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3444 /* We have to do the a8 fixes last, as they are less aligned than
3445 the other veneers. */
3446 return TRUE;
3447
3448 /* Make a note of the offset within the stubs for this entry. */
3449 stub_entry->stub_offset = stub_sec->size;
3450 loc = stub_sec->contents + stub_entry->stub_offset;
3451
3452 stub_bfd = stub_sec->owner;
3453
3454 /* This is the address of the start of the stub. */
3455 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3456 + stub_entry->stub_offset;
3457
3458 /* This is the address of the stub destination. */
3459 sym_value = (stub_entry->target_value
3460 + stub_entry->target_section->output_offset
3461 + stub_entry->target_section->output_section->vma);
3462
3463 template = stub_entry->stub_template;
3464 template_size = stub_entry->stub_template_size;
3465
3466 size = 0;
3467 for (i = 0; i < template_size; i++)
3468 {
3469 switch (template[i].type)
3470 {
3471 case THUMB16_TYPE:
3472 {
3473 bfd_vma data = template[i].data;
3474 if (template[i].reloc_addend != 0)
3475 {
3476 /* We've borrowed the reloc_addend field to mean we should
3477 insert a condition code into this (Thumb-1 branch)
3478 instruction. See THUMB16_BCOND_INSN. */
3479 BFD_ASSERT ((data & 0xff00) == 0xd000);
3480 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3481 }
3482 put_thumb_insn (globals, stub_bfd, data, loc + size);
3483 size += 2;
3484 }
3485 break;
3486
3487 case THUMB32_TYPE:
3488 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3489 loc + size);
3490 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3491 loc + size + 2);
3492 if (template[i].r_type != R_ARM_NONE)
3493 {
3494 stub_reloc_idx[nrelocs] = i;
3495 stub_reloc_offset[nrelocs++] = size;
3496 }
3497 size += 4;
3498 break;
3499
3500 case ARM_TYPE:
3501 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3502 /* Handle cases where the target is encoded within the
3503 instruction. */
3504 if (template[i].r_type == R_ARM_JUMP24)
3505 {
3506 stub_reloc_idx[nrelocs] = i;
3507 stub_reloc_offset[nrelocs++] = size;
3508 }
3509 size += 4;
3510 break;
3511
3512 case DATA_TYPE:
3513 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3514 stub_reloc_idx[nrelocs] = i;
3515 stub_reloc_offset[nrelocs++] = size;
3516 size += 4;
3517 break;
3518
3519 default:
3520 BFD_FAIL ();
3521 return FALSE;
3522 }
3523 }
3524
3525 stub_sec->size += size;
3526
3527 /* Stub size has already been computed in arm_size_one_stub. Check
3528 consistency. */
3529 BFD_ASSERT (size == stub_entry->stub_size);
3530
3531 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3532 if (stub_entry->st_type == STT_ARM_TFUNC)
3533 sym_value |= 1;
3534
3535 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3536 in each stub. */
3537 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3538
3539 for (i = 0; i < nrelocs; i++)
3540 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3541 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3542 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3543 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3544 {
3545 Elf_Internal_Rela rel;
3546 bfd_boolean unresolved_reloc;
3547 char *error_message;
3548 int sym_flags
3549 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3550 ? STT_ARM_TFUNC : 0;
3551 bfd_vma points_to = sym_value + stub_entry->target_addend;
3552
3553 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3554 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3555 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3556
3557 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3558 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3559 template should refer back to the instruction after the original
3560 branch. */
3561 points_to = sym_value;
3562
3563 /* There may be unintended consequences if this is not true. */
3564 BFD_ASSERT (stub_entry->h == NULL);
3565
3566 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3567 properly. We should probably use this function unconditionally,
3568 rather than only for certain relocations listed in the enclosing
3569 conditional, for the sake of consistency. */
3570 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3571 (template[stub_reloc_idx[i]].r_type),
3572 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3573 points_to, info, stub_entry->target_section, "", sym_flags,
3574 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3575 &error_message);
3576 }
3577 else
3578 {
3579 _bfd_final_link_relocate (elf32_arm_howto_from_type
3580 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3581 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3582 sym_value + stub_entry->target_addend,
3583 template[stub_reloc_idx[i]].reloc_addend);
3584 }
3585
3586 return TRUE;
3587 #undef MAXRELOCS
3588 }
3589
3590 /* Calculate the template, template size and instruction size for a stub.
3591 Return value is the instruction size. */
3592
3593 static unsigned int
3594 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3595 const insn_sequence **stub_template,
3596 int *stub_template_size)
3597 {
3598 const insn_sequence *template = NULL;
3599 int template_size = 0, i;
3600 unsigned int size;
3601
3602 template = stub_definitions[stub_type].template;
3603 template_size = stub_definitions[stub_type].template_size;
3604
3605 size = 0;
3606 for (i = 0; i < template_size; i++)
3607 {
3608 switch (template[i].type)
3609 {
3610 case THUMB16_TYPE:
3611 size += 2;
3612 break;
3613
3614 case ARM_TYPE:
3615 case THUMB32_TYPE:
3616 case DATA_TYPE:
3617 size += 4;
3618 break;
3619
3620 default:
3621 BFD_FAIL ();
3622 return FALSE;
3623 }
3624 }
3625
3626 if (stub_template)
3627 *stub_template = template;
3628
3629 if (stub_template_size)
3630 *stub_template_size = template_size;
3631
3632 return size;
3633 }
3634
3635 /* As above, but don't actually build the stub. Just bump offset so
3636 we know stub section sizes. */
3637
3638 static bfd_boolean
3639 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3640 void * in_arg)
3641 {
3642 struct elf32_arm_stub_hash_entry *stub_entry;
3643 struct elf32_arm_link_hash_table *htab;
3644 const insn_sequence *template;
3645 int template_size, size;
3646
3647 /* Massage our args to the form they really have. */
3648 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3649 htab = (struct elf32_arm_link_hash_table *) in_arg;
3650
3651 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3652 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3653
3654 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3655 &template_size);
3656
3657 stub_entry->stub_size = size;
3658 stub_entry->stub_template = template;
3659 stub_entry->stub_template_size = template_size;
3660
3661 size = (size + 7) & ~7;
3662 stub_entry->stub_sec->size += size;
3663
3664 return TRUE;
3665 }
3666
3667 /* External entry points for sizing and building linker stubs. */
3668
3669 /* Set up various things so that we can make a list of input sections
3670 for each output section included in the link. Returns -1 on error,
3671 0 when no stubs will be needed, and 1 on success. */
3672
3673 int
3674 elf32_arm_setup_section_lists (bfd *output_bfd,
3675 struct bfd_link_info *info)
3676 {
3677 bfd *input_bfd;
3678 unsigned int bfd_count;
3679 int top_id, top_index;
3680 asection *section;
3681 asection **input_list, **list;
3682 bfd_size_type amt;
3683 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3684
3685 if (! is_elf_hash_table (htab))
3686 return 0;
3687
3688 /* Count the number of input BFDs and find the top input section id. */
3689 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3690 input_bfd != NULL;
3691 input_bfd = input_bfd->link_next)
3692 {
3693 bfd_count += 1;
3694 for (section = input_bfd->sections;
3695 section != NULL;
3696 section = section->next)
3697 {
3698 if (top_id < section->id)
3699 top_id = section->id;
3700 }
3701 }
3702 htab->bfd_count = bfd_count;
3703
3704 amt = sizeof (struct map_stub) * (top_id + 1);
3705 htab->stub_group = bfd_zmalloc (amt);
3706 if (htab->stub_group == NULL)
3707 return -1;
3708
3709 /* We can't use output_bfd->section_count here to find the top output
3710 section index as some sections may have been removed, and
3711 _bfd_strip_section_from_output doesn't renumber the indices. */
3712 for (section = output_bfd->sections, top_index = 0;
3713 section != NULL;
3714 section = section->next)
3715 {
3716 if (top_index < section->index)
3717 top_index = section->index;
3718 }
3719
3720 htab->top_index = top_index;
3721 amt = sizeof (asection *) * (top_index + 1);
3722 input_list = bfd_malloc (amt);
3723 htab->input_list = input_list;
3724 if (input_list == NULL)
3725 return -1;
3726
3727 /* For sections we aren't interested in, mark their entries with a
3728 value we can check later. */
3729 list = input_list + top_index;
3730 do
3731 *list = bfd_abs_section_ptr;
3732 while (list-- != input_list);
3733
3734 for (section = output_bfd->sections;
3735 section != NULL;
3736 section = section->next)
3737 {
3738 if ((section->flags & SEC_CODE) != 0)
3739 input_list[section->index] = NULL;
3740 }
3741
3742 return 1;
3743 }
3744
3745 /* The linker repeatedly calls this function for each input section,
3746 in the order that input sections are linked into output sections.
3747 Build lists of input sections to determine groupings between which
3748 we may insert linker stubs. */
3749
3750 void
3751 elf32_arm_next_input_section (struct bfd_link_info *info,
3752 asection *isec)
3753 {
3754 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3755
3756 if (isec->output_section->index <= htab->top_index)
3757 {
3758 asection **list = htab->input_list + isec->output_section->index;
3759
3760 if (*list != bfd_abs_section_ptr)
3761 {
3762 /* Steal the link_sec pointer for our list. */
3763 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3764 /* This happens to make the list in reverse order,
3765 which we reverse later. */
3766 PREV_SEC (isec) = *list;
3767 *list = isec;
3768 }
3769 }
3770 }
3771
3772 /* See whether we can group stub sections together. Grouping stub
3773 sections may result in fewer stubs. More importantly, we need to
3774 put all .init* and .fini* stubs at the end of the .init or
3775 .fini output sections respectively, because glibc splits the
3776 _init and _fini functions into multiple parts. Putting a stub in
3777 the middle of a function is not a good idea. */
3778
3779 static void
3780 group_sections (struct elf32_arm_link_hash_table *htab,
3781 bfd_size_type stub_group_size,
3782 bfd_boolean stubs_always_after_branch)
3783 {
3784 asection **list = htab->input_list;
3785
3786 do
3787 {
3788 asection *tail = *list;
3789 asection *head;
3790
3791 if (tail == bfd_abs_section_ptr)
3792 continue;
3793
3794 /* Reverse the list: we must avoid placing stubs at the
3795 beginning of the section because the beginning of the text
3796 section may be required for an interrupt vector in bare metal
3797 code. */
3798 #define NEXT_SEC PREV_SEC
3799 head = NULL;
3800 while (tail != NULL)
3801 {
3802 /* Pop from tail. */
3803 asection *item = tail;
3804 tail = PREV_SEC (item);
3805
3806 /* Push on head. */
3807 NEXT_SEC (item) = head;
3808 head = item;
3809 }
3810
3811 while (head != NULL)
3812 {
3813 asection *curr;
3814 asection *next;
3815 bfd_vma stub_group_start = head->output_offset;
3816 bfd_vma end_of_next;
3817
3818 curr = head;
3819 while (NEXT_SEC (curr) != NULL)
3820 {
3821 next = NEXT_SEC (curr);
3822 end_of_next = next->output_offset + next->size;
3823 if (end_of_next - stub_group_start >= stub_group_size)
3824 /* End of NEXT is too far from start, so stop. */
3825 break;
3826 /* Add NEXT to the group. */
3827 curr = next;
3828 }
3829
3830 /* OK, the size from the start to the start of CURR is less
3831 than stub_group_size and thus can be handled by one stub
3832 section. (Or the head section is itself larger than
3833 stub_group_size, in which case we may be toast.)
3834 We should really be keeping track of the total size of
3835 stubs added here, as stubs contribute to the final output
3836 section size. */
3837 do
3838 {
3839 next = NEXT_SEC (head);
3840 /* Set up this stub group. */
3841 htab->stub_group[head->id].link_sec = curr;
3842 }
3843 while (head != curr && (head = next) != NULL);
3844
3845 /* But wait, there's more! Input sections up to stub_group_size
3846 bytes after the stub section can be handled by it too. */
3847 if (!stubs_always_after_branch)
3848 {
3849 stub_group_start = curr->output_offset + curr->size;
3850
3851 while (next != NULL)
3852 {
3853 end_of_next = next->output_offset + next->size;
3854 if (end_of_next - stub_group_start >= stub_group_size)
3855 /* End of NEXT is too far from stubs, so stop. */
3856 break;
3857 /* Add NEXT to the stub group. */
3858 head = next;
3859 next = NEXT_SEC (head);
3860 htab->stub_group[head->id].link_sec = curr;
3861 }
3862 }
3863 head = next;
3864 }
3865 }
3866 while (list++ != htab->input_list + htab->top_index);
3867
3868 free (htab->input_list);
3869 #undef PREV_SEC
3870 #undef NEXT_SEC
3871 }
3872
3873 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3874 erratum fix. */
3875
3876 static int
3877 a8_reloc_compare (const void *a, const void *b)
3878 {
3879 const struct a8_erratum_reloc *ra = a, *rb = b;
3880
3881 if (ra->from < rb->from)
3882 return -1;
3883 else if (ra->from > rb->from)
3884 return 1;
3885 else
3886 return 0;
3887 }
3888
3889 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3890 const char *, char **);
3891
3892 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3893 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3894 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3895 otherwise. */
3896
3897 static bfd_boolean
3898 cortex_a8_erratum_scan (bfd *input_bfd,
3899 struct bfd_link_info *info,
3900 struct a8_erratum_fix **a8_fixes_p,
3901 unsigned int *num_a8_fixes_p,
3902 unsigned int *a8_fix_table_size_p,
3903 struct a8_erratum_reloc *a8_relocs,
3904 unsigned int num_a8_relocs,
3905 unsigned prev_num_a8_fixes,
3906 bfd_boolean *stub_changed_p)
3907 {
3908 asection *section;
3909 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3910 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3911 unsigned int num_a8_fixes = *num_a8_fixes_p;
3912 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3913
3914 for (section = input_bfd->sections;
3915 section != NULL;
3916 section = section->next)
3917 {
3918 bfd_byte *contents = NULL;
3919 struct _arm_elf_section_data *sec_data;
3920 unsigned int span;
3921 bfd_vma base_vma;
3922
3923 if (elf_section_type (section) != SHT_PROGBITS
3924 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3925 || (section->flags & SEC_EXCLUDE) != 0
3926 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3927 || (section->output_section == bfd_abs_section_ptr))
3928 continue;
3929
3930 base_vma = section->output_section->vma + section->output_offset;
3931
3932 if (elf_section_data (section)->this_hdr.contents != NULL)
3933 contents = elf_section_data (section)->this_hdr.contents;
3934 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3935 return TRUE;
3936
3937 sec_data = elf32_arm_section_data (section);
3938
3939 for (span = 0; span < sec_data->mapcount; span++)
3940 {
3941 unsigned int span_start = sec_data->map[span].vma;
3942 unsigned int span_end = (span == sec_data->mapcount - 1)
3943 ? section->size : sec_data->map[span + 1].vma;
3944 unsigned int i;
3945 char span_type = sec_data->map[span].type;
3946 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3947
3948 if (span_type != 't')
3949 continue;
3950
3951 /* Span is entirely within a single 4KB region: skip scanning. */
3952 if (((base_vma + span_start) & ~0xfff)
3953 == ((base_vma + span_end) & ~0xfff))
3954 continue;
3955
3956 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3957
3958 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3959 * The branch target is in the same 4KB region as the
3960 first half of the branch.
3961 * The instruction before the branch is a 32-bit
3962 length non-branch instruction. */
3963 for (i = span_start; i < span_end;)
3964 {
3965 unsigned int insn = bfd_getl16 (&contents[i]);
3966 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3967 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3968
3969 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3970 insn_32bit = TRUE;
3971
3972 if (insn_32bit)
3973 {
3974 /* Load the rest of the insn (in manual-friendly order). */
3975 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3976
3977 /* Encoding T4: B<c>.W. */
3978 is_b = (insn & 0xf800d000) == 0xf0009000;
3979 /* Encoding T1: BL<c>.W. */
3980 is_bl = (insn & 0xf800d000) == 0xf000d000;
3981 /* Encoding T2: BLX<c>.W. */
3982 is_blx = (insn & 0xf800d000) == 0xf000c000;
3983 /* Encoding T3: B<c>.W (not permitted in IT block). */
3984 is_bcc = (insn & 0xf800d000) == 0xf0008000
3985 && (insn & 0x07f00000) != 0x03800000;
3986 }
3987
3988 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3989
3990 if (((base_vma + i) & 0xfff) == 0xffe
3991 && insn_32bit
3992 && is_32bit_branch
3993 && last_was_32bit
3994 && ! last_was_branch)
3995 {
3996 bfd_signed_vma offset;
3997 bfd_boolean force_target_arm = FALSE;
3998 bfd_boolean force_target_thumb = FALSE;
3999 bfd_vma target;
4000 enum elf32_arm_stub_type stub_type = arm_stub_none;
4001 struct a8_erratum_reloc key, *found;
4002
4003 key.from = base_vma + i;
4004 found = bsearch (&key, a8_relocs, num_a8_relocs,
4005 sizeof (struct a8_erratum_reloc),
4006 &a8_reloc_compare);
4007
4008 if (found)
4009 {
4010 char *error_message = NULL;
4011 struct elf_link_hash_entry *entry;
4012
4013 /* We don't care about the error returned from this
4014 function, only if there is glue or not. */
4015 entry = find_thumb_glue (info, found->sym_name,
4016 &error_message);
4017
4018 if (entry)
4019 found->non_a8_stub = TRUE;
4020
4021 if (found->r_type == R_ARM_THM_CALL
4022 && found->st_type != STT_ARM_TFUNC)
4023 force_target_arm = TRUE;
4024 else if (found->r_type == R_ARM_THM_CALL
4025 && found->st_type == STT_ARM_TFUNC)
4026 force_target_thumb = TRUE;
4027 }
4028
4029 /* Check if we have an offending branch instruction. */
4030
4031 if (found && found->non_a8_stub)
4032 /* We've already made a stub for this instruction, e.g.
4033 it's a long branch or a Thumb->ARM stub. Assume that
4034 stub will suffice to work around the A8 erratum (see
4035 setting of always_after_branch above). */
4036 ;
4037 else if (is_bcc)
4038 {
4039 offset = (insn & 0x7ff) << 1;
4040 offset |= (insn & 0x3f0000) >> 4;
4041 offset |= (insn & 0x2000) ? 0x40000 : 0;
4042 offset |= (insn & 0x800) ? 0x80000 : 0;
4043 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4044 if (offset & 0x100000)
4045 offset |= ~ ((bfd_signed_vma) 0xfffff);
4046 stub_type = arm_stub_a8_veneer_b_cond;
4047 }
4048 else if (is_b || is_bl || is_blx)
4049 {
4050 int s = (insn & 0x4000000) != 0;
4051 int j1 = (insn & 0x2000) != 0;
4052 int j2 = (insn & 0x800) != 0;
4053 int i1 = !(j1 ^ s);
4054 int i2 = !(j2 ^ s);
4055
4056 offset = (insn & 0x7ff) << 1;
4057 offset |= (insn & 0x3ff0000) >> 4;
4058 offset |= i2 << 22;
4059 offset |= i1 << 23;
4060 offset |= s << 24;
4061 if (offset & 0x1000000)
4062 offset |= ~ ((bfd_signed_vma) 0xffffff);
4063
4064 if (is_blx)
4065 offset &= ~ ((bfd_signed_vma) 3);
4066
4067 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4068 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4069 }
4070
4071 if (stub_type != arm_stub_none)
4072 {
4073 bfd_vma pc_for_insn = base_vma + i + 4;
4074
4075 /* The original instruction is a BL, but the target is
4076 an ARM instruction. If we were not making a stub,
4077 the BL would have been converted to a BLX. Use the
4078 BLX stub instead in that case. */
4079 if (htab->use_blx && force_target_arm
4080 && stub_type == arm_stub_a8_veneer_bl)
4081 {
4082 stub_type = arm_stub_a8_veneer_blx;
4083 is_blx = TRUE;
4084 is_bl = FALSE;
4085 }
4086 /* Conversely, if the original instruction was
4087 BLX but the target is Thumb mode, use the BL
4088 stub. */
4089 else if (force_target_thumb
4090 && stub_type == arm_stub_a8_veneer_blx)
4091 {
4092 stub_type = arm_stub_a8_veneer_bl;
4093 is_blx = FALSE;
4094 is_bl = TRUE;
4095 }
4096
4097 if (is_blx)
4098 pc_for_insn &= ~ ((bfd_vma) 3);
4099
4100 /* If we found a relocation, use the proper destination,
4101 not the offset in the (unrelocated) instruction.
4102 Note this is always done if we switched the stub type
4103 above. */
4104 if (found)
4105 offset =
4106 (bfd_signed_vma) (found->destination - pc_for_insn);
4107
4108 target = pc_for_insn + offset;
4109
4110 /* The BLX stub is ARM-mode code. Adjust the offset to
4111 take the different PC value (+8 instead of +4) into
4112 account. */
4113 if (stub_type == arm_stub_a8_veneer_blx)
4114 offset += 4;
4115
4116 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4117 {
4118 char *stub_name = NULL;
4119
4120 if (num_a8_fixes == a8_fix_table_size)
4121 {
4122 a8_fix_table_size *= 2;
4123 a8_fixes = bfd_realloc (a8_fixes,
4124 sizeof (struct a8_erratum_fix)
4125 * a8_fix_table_size);
4126 }
4127
4128 if (num_a8_fixes < prev_num_a8_fixes)
4129 {
4130 /* If we're doing a subsequent scan,
4131 check if we've found the same fix as
4132 before, and try and reuse the stub
4133 name. */
4134 stub_name = a8_fixes[num_a8_fixes].stub_name;
4135 if ((a8_fixes[num_a8_fixes].section != section)
4136 || (a8_fixes[num_a8_fixes].offset != i))
4137 {
4138 free (stub_name);
4139 stub_name = NULL;
4140 *stub_changed_p = TRUE;
4141 }
4142 }
4143
4144 if (!stub_name)
4145 {
4146 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4147 if (stub_name != NULL)
4148 sprintf (stub_name, "%x:%x", section->id, i);
4149 }
4150
4151 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4152 a8_fixes[num_a8_fixes].section = section;
4153 a8_fixes[num_a8_fixes].offset = i;
4154 a8_fixes[num_a8_fixes].addend = offset;
4155 a8_fixes[num_a8_fixes].orig_insn = insn;
4156 a8_fixes[num_a8_fixes].stub_name = stub_name;
4157 a8_fixes[num_a8_fixes].stub_type = stub_type;
4158
4159 num_a8_fixes++;
4160 }
4161 }
4162 }
4163
4164 i += insn_32bit ? 4 : 2;
4165 last_was_32bit = insn_32bit;
4166 last_was_branch = is_32bit_branch;
4167 }
4168 }
4169
4170 if (elf_section_data (section)->this_hdr.contents == NULL)
4171 free (contents);
4172 }
4173
4174 *a8_fixes_p = a8_fixes;
4175 *num_a8_fixes_p = num_a8_fixes;
4176 *a8_fix_table_size_p = a8_fix_table_size;
4177
4178 return FALSE;
4179 }
4180
4181 /* Determine and set the size of the stub section for a final link.
4182
4183 The basic idea here is to examine all the relocations looking for
4184 PC-relative calls to a target that is unreachable with a "bl"
4185 instruction. */
4186
4187 bfd_boolean
4188 elf32_arm_size_stubs (bfd *output_bfd,
4189 bfd *stub_bfd,
4190 struct bfd_link_info *info,
4191 bfd_signed_vma group_size,
4192 asection * (*add_stub_section) (const char *, asection *),
4193 void (*layout_sections_again) (void))
4194 {
4195 bfd_size_type stub_group_size;
4196 bfd_boolean stubs_always_after_branch;
4197 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4198 struct a8_erratum_fix *a8_fixes = NULL;
4199 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4200 struct a8_erratum_reloc *a8_relocs = NULL;
4201 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4202
4203 if (htab->fix_cortex_a8)
4204 {
4205 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4206 * a8_fix_table_size);
4207 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4208 * a8_reloc_table_size);
4209 }
4210
4211 /* Propagate mach to stub bfd, because it may not have been
4212 finalized when we created stub_bfd. */
4213 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4214 bfd_get_mach (output_bfd));
4215
4216 /* Stash our params away. */
4217 htab->stub_bfd = stub_bfd;
4218 htab->add_stub_section = add_stub_section;
4219 htab->layout_sections_again = layout_sections_again;
4220 stubs_always_after_branch = group_size < 0;
4221
4222 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4223 as the first half of a 32-bit branch straddling two 4K pages. This is a
4224 crude way of enforcing that. */
4225 if (htab->fix_cortex_a8)
4226 stubs_always_after_branch = 1;
4227
4228 if (group_size < 0)
4229 stub_group_size = -group_size;
4230 else
4231 stub_group_size = group_size;
4232
4233 if (stub_group_size == 1)
4234 {
4235 /* Default values. */
4236 /* Thumb branch range is +-4MB has to be used as the default
4237 maximum size (a given section can contain both ARM and Thumb
4238 code, so the worst case has to be taken into account).
4239
4240 This value is 24K less than that, which allows for 2025
4241 12-byte stubs. If we exceed that, then we will fail to link.
4242 The user will have to relink with an explicit group size
4243 option. */
4244 stub_group_size = 4170000;
4245 }
4246
4247 group_sections (htab, stub_group_size, stubs_always_after_branch);
4248
4249 while (1)
4250 {
4251 bfd *input_bfd;
4252 unsigned int bfd_indx;
4253 asection *stub_sec;
4254 bfd_boolean stub_changed = FALSE;
4255 unsigned prev_num_a8_fixes = num_a8_fixes;
4256
4257 num_a8_fixes = 0;
4258 for (input_bfd = info->input_bfds, bfd_indx = 0;
4259 input_bfd != NULL;
4260 input_bfd = input_bfd->link_next, bfd_indx++)
4261 {
4262 Elf_Internal_Shdr *symtab_hdr;
4263 asection *section;
4264 Elf_Internal_Sym *local_syms = NULL;
4265
4266 num_a8_relocs = 0;
4267
4268 /* We'll need the symbol table in a second. */
4269 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4270 if (symtab_hdr->sh_info == 0)
4271 continue;
4272
4273 /* Walk over each section attached to the input bfd. */
4274 for (section = input_bfd->sections;
4275 section != NULL;
4276 section = section->next)
4277 {
4278 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4279
4280 /* If there aren't any relocs, then there's nothing more
4281 to do. */
4282 if ((section->flags & SEC_RELOC) == 0
4283 || section->reloc_count == 0
4284 || (section->flags & SEC_CODE) == 0)
4285 continue;
4286
4287 /* If this section is a link-once section that will be
4288 discarded, then don't create any stubs. */
4289 if (section->output_section == NULL
4290 || section->output_section->owner != output_bfd)
4291 continue;
4292
4293 /* Get the relocs. */
4294 internal_relocs
4295 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4296 NULL, info->keep_memory);
4297 if (internal_relocs == NULL)
4298 goto error_ret_free_local;
4299
4300 /* Now examine each relocation. */
4301 irela = internal_relocs;
4302 irelaend = irela + section->reloc_count;
4303 for (; irela < irelaend; irela++)
4304 {
4305 unsigned int r_type, r_indx;
4306 enum elf32_arm_stub_type stub_type;
4307 struct elf32_arm_stub_hash_entry *stub_entry;
4308 asection *sym_sec;
4309 bfd_vma sym_value;
4310 bfd_vma destination;
4311 struct elf32_arm_link_hash_entry *hash;
4312 const char *sym_name;
4313 char *stub_name;
4314 const asection *id_sec;
4315 unsigned char st_type;
4316 bfd_boolean created_stub = FALSE;
4317
4318 r_type = ELF32_R_TYPE (irela->r_info);
4319 r_indx = ELF32_R_SYM (irela->r_info);
4320
4321 if (r_type >= (unsigned int) R_ARM_max)
4322 {
4323 bfd_set_error (bfd_error_bad_value);
4324 error_ret_free_internal:
4325 if (elf_section_data (section)->relocs == NULL)
4326 free (internal_relocs);
4327 goto error_ret_free_local;
4328 }
4329
4330 /* Only look for stubs on branch instructions. */
4331 if ((r_type != (unsigned int) R_ARM_CALL)
4332 && (r_type != (unsigned int) R_ARM_THM_CALL)
4333 && (r_type != (unsigned int) R_ARM_JUMP24)
4334 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4335 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4336 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4337 && (r_type != (unsigned int) R_ARM_PLT32))
4338 continue;
4339
4340 /* Now determine the call target, its name, value,
4341 section. */
4342 sym_sec = NULL;
4343 sym_value = 0;
4344 destination = 0;
4345 hash = NULL;
4346 sym_name = NULL;
4347 if (r_indx < symtab_hdr->sh_info)
4348 {
4349 /* It's a local symbol. */
4350 Elf_Internal_Sym *sym;
4351 Elf_Internal_Shdr *hdr;
4352
4353 if (local_syms == NULL)
4354 {
4355 local_syms
4356 = (Elf_Internal_Sym *) symtab_hdr->contents;
4357 if (local_syms == NULL)
4358 local_syms
4359 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4360 symtab_hdr->sh_info, 0,
4361 NULL, NULL, NULL);
4362 if (local_syms == NULL)
4363 goto error_ret_free_internal;
4364 }
4365
4366 sym = local_syms + r_indx;
4367 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4368 sym_sec = hdr->bfd_section;
4369 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4370 sym_value = sym->st_value;
4371 destination = (sym_value + irela->r_addend
4372 + sym_sec->output_offset
4373 + sym_sec->output_section->vma);
4374 st_type = ELF_ST_TYPE (sym->st_info);
4375 sym_name
4376 = bfd_elf_string_from_elf_section (input_bfd,
4377 symtab_hdr->sh_link,
4378 sym->st_name);
4379 }
4380 else
4381 {
4382 /* It's an external symbol. */
4383 int e_indx;
4384
4385 e_indx = r_indx - symtab_hdr->sh_info;
4386 hash = ((struct elf32_arm_link_hash_entry *)
4387 elf_sym_hashes (input_bfd)[e_indx]);
4388
4389 while (hash->root.root.type == bfd_link_hash_indirect
4390 || hash->root.root.type == bfd_link_hash_warning)
4391 hash = ((struct elf32_arm_link_hash_entry *)
4392 hash->root.root.u.i.link);
4393
4394 if (hash->root.root.type == bfd_link_hash_defined
4395 || hash->root.root.type == bfd_link_hash_defweak)
4396 {
4397 sym_sec = hash->root.root.u.def.section;
4398 sym_value = hash->root.root.u.def.value;
4399
4400 struct elf32_arm_link_hash_table *globals =
4401 elf32_arm_hash_table (info);
4402
4403 /* For a destination in a shared library,
4404 use the PLT stub as target address to
4405 decide whether a branch stub is
4406 needed. */
4407 if (globals->splt != NULL && hash != NULL
4408 && hash->root.plt.offset != (bfd_vma) -1)
4409 {
4410 sym_sec = globals->splt;
4411 sym_value = hash->root.plt.offset;
4412 if (sym_sec->output_section != NULL)
4413 destination = (sym_value
4414 + sym_sec->output_offset
4415 + sym_sec->output_section->vma);
4416 }
4417 else if (sym_sec->output_section != NULL)
4418 destination = (sym_value + irela->r_addend
4419 + sym_sec->output_offset
4420 + sym_sec->output_section->vma);
4421 }
4422 else if ((hash->root.root.type == bfd_link_hash_undefined)
4423 || (hash->root.root.type == bfd_link_hash_undefweak))
4424 {
4425 /* For a shared library, use the PLT stub as
4426 target address to decide whether a long
4427 branch stub is needed.
4428 For absolute code, they cannot be handled. */
4429 struct elf32_arm_link_hash_table *globals =
4430 elf32_arm_hash_table (info);
4431
4432 if (globals->splt != NULL && hash != NULL
4433 && hash->root.plt.offset != (bfd_vma) -1)
4434 {
4435 sym_sec = globals->splt;
4436 sym_value = hash->root.plt.offset;
4437 if (sym_sec->output_section != NULL)
4438 destination = (sym_value
4439 + sym_sec->output_offset
4440 + sym_sec->output_section->vma);
4441 }
4442 else
4443 continue;
4444 }
4445 else
4446 {
4447 bfd_set_error (bfd_error_bad_value);
4448 goto error_ret_free_internal;
4449 }
4450 st_type = ELF_ST_TYPE (hash->root.type);
4451 sym_name = hash->root.root.root.string;
4452 }
4453
4454 do
4455 {
4456 /* Determine what (if any) linker stub is needed. */
4457 stub_type = arm_type_of_stub (info, section, irela,
4458 st_type, hash,
4459 destination, sym_sec,
4460 input_bfd, sym_name);
4461 if (stub_type == arm_stub_none)
4462 break;
4463
4464 /* Support for grouping stub sections. */
4465 id_sec = htab->stub_group[section->id].link_sec;
4466
4467 /* Get the name of this stub. */
4468 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4469 irela);
4470 if (!stub_name)
4471 goto error_ret_free_internal;
4472
4473 /* We've either created a stub for this reloc already,
4474 or we are about to. */
4475 created_stub = TRUE;
4476
4477 stub_entry = arm_stub_hash_lookup
4478 (&htab->stub_hash_table, stub_name,
4479 FALSE, FALSE);
4480 if (stub_entry != NULL)
4481 {
4482 /* The proper stub has already been created. */
4483 free (stub_name);
4484 stub_entry->target_value = sym_value;
4485 break;
4486 }
4487
4488 stub_entry = elf32_arm_add_stub (stub_name, section,
4489 htab);
4490 if (stub_entry == NULL)
4491 {
4492 free (stub_name);
4493 goto error_ret_free_internal;
4494 }
4495
4496 stub_entry->target_value = sym_value;
4497 stub_entry->target_section = sym_sec;
4498 stub_entry->stub_type = stub_type;
4499 stub_entry->h = hash;
4500 stub_entry->st_type = st_type;
4501
4502 if (sym_name == NULL)
4503 sym_name = "unnamed";
4504 stub_entry->output_name
4505 = bfd_alloc (htab->stub_bfd,
4506 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4507 + strlen (sym_name));
4508 if (stub_entry->output_name == NULL)
4509 {
4510 free (stub_name);
4511 goto error_ret_free_internal;
4512 }
4513
4514 /* For historical reasons, use the existing names for
4515 ARM-to-Thumb and Thumb-to-ARM stubs. */
4516 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4517 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4518 && st_type != STT_ARM_TFUNC)
4519 sprintf (stub_entry->output_name,
4520 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4521 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4522 || (r_type == (unsigned int) R_ARM_JUMP24))
4523 && st_type == STT_ARM_TFUNC)
4524 sprintf (stub_entry->output_name,
4525 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4526 else
4527 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4528 sym_name);
4529
4530 stub_changed = TRUE;
4531 }
4532 while (0);
4533
4534 /* Look for relocations which might trigger Cortex-A8
4535 erratum. */
4536 if (htab->fix_cortex_a8
4537 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4538 || r_type == (unsigned int) R_ARM_THM_JUMP19
4539 || r_type == (unsigned int) R_ARM_THM_CALL
4540 || r_type == (unsigned int) R_ARM_THM_XPC22))
4541 {
4542 bfd_vma from = section->output_section->vma
4543 + section->output_offset
4544 + irela->r_offset;
4545
4546 if ((from & 0xfff) == 0xffe)
4547 {
4548 /* Found a candidate. Note we haven't checked the
4549 destination is within 4K here: if we do so (and
4550 don't create an entry in a8_relocs) we can't tell
4551 that a branch should have been relocated when
4552 scanning later. */
4553 if (num_a8_relocs == a8_reloc_table_size)
4554 {
4555 a8_reloc_table_size *= 2;
4556 a8_relocs = bfd_realloc (a8_relocs,
4557 sizeof (struct a8_erratum_reloc)
4558 * a8_reloc_table_size);
4559 }
4560
4561 a8_relocs[num_a8_relocs].from = from;
4562 a8_relocs[num_a8_relocs].destination = destination;
4563 a8_relocs[num_a8_relocs].r_type = r_type;
4564 a8_relocs[num_a8_relocs].st_type = st_type;
4565 a8_relocs[num_a8_relocs].sym_name = sym_name;
4566 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4567
4568 num_a8_relocs++;
4569 }
4570 }
4571 }
4572
4573 /* We're done with the internal relocs, free them. */
4574 if (elf_section_data (section)->relocs == NULL)
4575 free (internal_relocs);
4576 }
4577
4578 if (htab->fix_cortex_a8)
4579 {
4580 /* Sort relocs which might apply to Cortex-A8 erratum. */
4581 qsort (a8_relocs, num_a8_relocs,
4582 sizeof (struct a8_erratum_reloc),
4583 &a8_reloc_compare);
4584
4585 /* Scan for branches which might trigger Cortex-A8 erratum. */
4586 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4587 &num_a8_fixes, &a8_fix_table_size,
4588 a8_relocs, num_a8_relocs,
4589 prev_num_a8_fixes, &stub_changed)
4590 != 0)
4591 goto error_ret_free_local;
4592 }
4593 }
4594
4595 if (prev_num_a8_fixes != num_a8_fixes)
4596 stub_changed = TRUE;
4597
4598 if (!stub_changed)
4599 break;
4600
4601 /* OK, we've added some stubs. Find out the new size of the
4602 stub sections. */
4603 for (stub_sec = htab->stub_bfd->sections;
4604 stub_sec != NULL;
4605 stub_sec = stub_sec->next)
4606 {
4607 /* Ignore non-stub sections. */
4608 if (!strstr (stub_sec->name, STUB_SUFFIX))
4609 continue;
4610
4611 stub_sec->size = 0;
4612 }
4613
4614 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4615
4616 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4617 if (htab->fix_cortex_a8)
4618 for (i = 0; i < num_a8_fixes; i++)
4619 {
4620 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4621 a8_fixes[i].section, htab);
4622
4623 if (stub_sec == NULL)
4624 goto error_ret_free_local;
4625
4626 stub_sec->size
4627 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4628 NULL);
4629 }
4630
4631
4632 /* Ask the linker to do its stuff. */
4633 (*htab->layout_sections_again) ();
4634 }
4635
4636 /* Add stubs for Cortex-A8 erratum fixes now. */
4637 if (htab->fix_cortex_a8)
4638 {
4639 for (i = 0; i < num_a8_fixes; i++)
4640 {
4641 struct elf32_arm_stub_hash_entry *stub_entry;
4642 char *stub_name = a8_fixes[i].stub_name;
4643 asection *section = a8_fixes[i].section;
4644 unsigned int section_id = a8_fixes[i].section->id;
4645 asection *link_sec = htab->stub_group[section_id].link_sec;
4646 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4647 const insn_sequence *template;
4648 int template_size, size = 0;
4649
4650 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4651 TRUE, FALSE);
4652 if (stub_entry == NULL)
4653 {
4654 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4655 section->owner,
4656 stub_name);
4657 return FALSE;
4658 }
4659
4660 stub_entry->stub_sec = stub_sec;
4661 stub_entry->stub_offset = 0;
4662 stub_entry->id_sec = link_sec;
4663 stub_entry->stub_type = a8_fixes[i].stub_type;
4664 stub_entry->target_section = a8_fixes[i].section;
4665 stub_entry->target_value = a8_fixes[i].offset;
4666 stub_entry->target_addend = a8_fixes[i].addend;
4667 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4668 stub_entry->st_type = STT_ARM_TFUNC;
4669
4670 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4671 &template_size);
4672
4673 stub_entry->stub_size = size;
4674 stub_entry->stub_template = template;
4675 stub_entry->stub_template_size = template_size;
4676 }
4677
4678 /* Stash the Cortex-A8 erratum fix array for use later in
4679 elf32_arm_write_section(). */
4680 htab->a8_erratum_fixes = a8_fixes;
4681 htab->num_a8_erratum_fixes = num_a8_fixes;
4682 }
4683 else
4684 {
4685 htab->a8_erratum_fixes = NULL;
4686 htab->num_a8_erratum_fixes = 0;
4687 }
4688 return TRUE;
4689
4690 error_ret_free_local:
4691 return FALSE;
4692 }
4693
4694 /* Build all the stubs associated with the current output file. The
4695 stubs are kept in a hash table attached to the main linker hash
4696 table. We also set up the .plt entries for statically linked PIC
4697 functions here. This function is called via arm_elf_finish in the
4698 linker. */
4699
4700 bfd_boolean
4701 elf32_arm_build_stubs (struct bfd_link_info *info)
4702 {
4703 asection *stub_sec;
4704 struct bfd_hash_table *table;
4705 struct elf32_arm_link_hash_table *htab;
4706
4707 htab = elf32_arm_hash_table (info);
4708
4709 for (stub_sec = htab->stub_bfd->sections;
4710 stub_sec != NULL;
4711 stub_sec = stub_sec->next)
4712 {
4713 bfd_size_type size;
4714
4715 /* Ignore non-stub sections. */
4716 if (!strstr (stub_sec->name, STUB_SUFFIX))
4717 continue;
4718
4719 /* Allocate memory to hold the linker stubs. */
4720 size = stub_sec->size;
4721 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4722 if (stub_sec->contents == NULL && size != 0)
4723 return FALSE;
4724 stub_sec->size = 0;
4725 }
4726
4727 /* Build the stubs as directed by the stub hash table. */
4728 table = &htab->stub_hash_table;
4729 bfd_hash_traverse (table, arm_build_one_stub, info);
4730 if (htab->fix_cortex_a8)
4731 {
4732 /* Place the cortex a8 stubs last. */
4733 htab->fix_cortex_a8 = -1;
4734 bfd_hash_traverse (table, arm_build_one_stub, info);
4735 }
4736
4737 return TRUE;
4738 }
4739
4740 /* Locate the Thumb encoded calling stub for NAME. */
4741
4742 static struct elf_link_hash_entry *
4743 find_thumb_glue (struct bfd_link_info *link_info,
4744 const char *name,
4745 char **error_message)
4746 {
4747 char *tmp_name;
4748 struct elf_link_hash_entry *hash;
4749 struct elf32_arm_link_hash_table *hash_table;
4750
4751 /* We need a pointer to the armelf specific hash table. */
4752 hash_table = elf32_arm_hash_table (link_info);
4753
4754 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4755 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4756
4757 BFD_ASSERT (tmp_name);
4758
4759 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4760
4761 hash = elf_link_hash_lookup
4762 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4763
4764 if (hash == NULL
4765 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4766 tmp_name, name) == -1)
4767 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4768
4769 free (tmp_name);
4770
4771 return hash;
4772 }
4773
4774 /* Locate the ARM encoded calling stub for NAME. */
4775
4776 static struct elf_link_hash_entry *
4777 find_arm_glue (struct bfd_link_info *link_info,
4778 const char *name,
4779 char **error_message)
4780 {
4781 char *tmp_name;
4782 struct elf_link_hash_entry *myh;
4783 struct elf32_arm_link_hash_table *hash_table;
4784
4785 /* We need a pointer to the elfarm specific hash table. */
4786 hash_table = elf32_arm_hash_table (link_info);
4787
4788 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4789 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4790
4791 BFD_ASSERT (tmp_name);
4792
4793 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4794
4795 myh = elf_link_hash_lookup
4796 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4797
4798 if (myh == NULL
4799 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4800 tmp_name, name) == -1)
4801 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4802
4803 free (tmp_name);
4804
4805 return myh;
4806 }
4807
4808 /* ARM->Thumb glue (static images):
4809
4810 .arm
4811 __func_from_arm:
4812 ldr r12, __func_addr
4813 bx r12
4814 __func_addr:
4815 .word func @ behave as if you saw a ARM_32 reloc.
4816
4817 (v5t static images)
4818 .arm
4819 __func_from_arm:
4820 ldr pc, __func_addr
4821 __func_addr:
4822 .word func @ behave as if you saw a ARM_32 reloc.
4823
4824 (relocatable images)
4825 .arm
4826 __func_from_arm:
4827 ldr r12, __func_offset
4828 add r12, r12, pc
4829 bx r12
4830 __func_offset:
4831 .word func - . */
4832
4833 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4834 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4835 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4836 static const insn32 a2t3_func_addr_insn = 0x00000001;
4837
4838 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4839 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4840 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4841
4842 #define ARM2THUMB_PIC_GLUE_SIZE 16
4843 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4844 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4845 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4846
4847 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4848
4849 .thumb .thumb
4850 .align 2 .align 2
4851 __func_from_thumb: __func_from_thumb:
4852 bx pc push {r6, lr}
4853 nop ldr r6, __func_addr
4854 .arm mov lr, pc
4855 b func bx r6
4856 .arm
4857 ;; back_to_thumb
4858 ldmia r13! {r6, lr}
4859 bx lr
4860 __func_addr:
4861 .word func */
4862
4863 #define THUMB2ARM_GLUE_SIZE 8
4864 static const insn16 t2a1_bx_pc_insn = 0x4778;
4865 static const insn16 t2a2_noop_insn = 0x46c0;
4866 static const insn32 t2a3_b_insn = 0xea000000;
4867
4868 #define VFP11_ERRATUM_VENEER_SIZE 8
4869
4870 #define ARM_BX_VENEER_SIZE 12
4871 static const insn32 armbx1_tst_insn = 0xe3100001;
4872 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4873 static const insn32 armbx3_bx_insn = 0xe12fff10;
4874
4875 #ifndef ELFARM_NABI_C_INCLUDED
4876 static void
4877 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4878 {
4879 asection * s;
4880 bfd_byte * contents;
4881
4882 if (size == 0)
4883 {
4884 /* Do not include empty glue sections in the output. */
4885 if (abfd != NULL)
4886 {
4887 s = bfd_get_section_by_name (abfd, name);
4888 if (s != NULL)
4889 s->flags |= SEC_EXCLUDE;
4890 }
4891 return;
4892 }
4893
4894 BFD_ASSERT (abfd != NULL);
4895
4896 s = bfd_get_section_by_name (abfd, name);
4897 BFD_ASSERT (s != NULL);
4898
4899 contents = bfd_alloc (abfd, size);
4900
4901 BFD_ASSERT (s->size == size);
4902 s->contents = contents;
4903 }
4904
4905 bfd_boolean
4906 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4907 {
4908 struct elf32_arm_link_hash_table * globals;
4909
4910 globals = elf32_arm_hash_table (info);
4911 BFD_ASSERT (globals != NULL);
4912
4913 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4914 globals->arm_glue_size,
4915 ARM2THUMB_GLUE_SECTION_NAME);
4916
4917 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4918 globals->thumb_glue_size,
4919 THUMB2ARM_GLUE_SECTION_NAME);
4920
4921 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4922 globals->vfp11_erratum_glue_size,
4923 VFP11_ERRATUM_VENEER_SECTION_NAME);
4924
4925 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4926 globals->bx_glue_size,
4927 ARM_BX_GLUE_SECTION_NAME);
4928
4929 return TRUE;
4930 }
4931
4932 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4933 returns the symbol identifying the stub. */
4934
4935 static struct elf_link_hash_entry *
4936 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4937 struct elf_link_hash_entry * h)
4938 {
4939 const char * name = h->root.root.string;
4940 asection * s;
4941 char * tmp_name;
4942 struct elf_link_hash_entry * myh;
4943 struct bfd_link_hash_entry * bh;
4944 struct elf32_arm_link_hash_table * globals;
4945 bfd_vma val;
4946 bfd_size_type size;
4947
4948 globals = elf32_arm_hash_table (link_info);
4949
4950 BFD_ASSERT (globals != NULL);
4951 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4952
4953 s = bfd_get_section_by_name
4954 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4955
4956 BFD_ASSERT (s != NULL);
4957
4958 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4959
4960 BFD_ASSERT (tmp_name);
4961
4962 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4963
4964 myh = elf_link_hash_lookup
4965 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4966
4967 if (myh != NULL)
4968 {
4969 /* We've already seen this guy. */
4970 free (tmp_name);
4971 return myh;
4972 }
4973
4974 /* The only trick here is using hash_table->arm_glue_size as the value.
4975 Even though the section isn't allocated yet, this is where we will be
4976 putting it. The +1 on the value marks that the stub has not been
4977 output yet - not that it is a Thumb function. */
4978 bh = NULL;
4979 val = globals->arm_glue_size + 1;
4980 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4981 tmp_name, BSF_GLOBAL, s, val,
4982 NULL, TRUE, FALSE, &bh);
4983
4984 myh = (struct elf_link_hash_entry *) bh;
4985 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4986 myh->forced_local = 1;
4987
4988 free (tmp_name);
4989
4990 if (link_info->shared || globals->root.is_relocatable_executable
4991 || globals->pic_veneer)
4992 size = ARM2THUMB_PIC_GLUE_SIZE;
4993 else if (globals->use_blx)
4994 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4995 else
4996 size = ARM2THUMB_STATIC_GLUE_SIZE;
4997
4998 s->size += size;
4999 globals->arm_glue_size += size;
5000
5001 return myh;
5002 }
5003
5004 /* Allocate space for ARMv4 BX veneers. */
5005
5006 static void
5007 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5008 {
5009 asection * s;
5010 struct elf32_arm_link_hash_table *globals;
5011 char *tmp_name;
5012 struct elf_link_hash_entry *myh;
5013 struct bfd_link_hash_entry *bh;
5014 bfd_vma val;
5015
5016 /* BX PC does not need a veneer. */
5017 if (reg == 15)
5018 return;
5019
5020 globals = elf32_arm_hash_table (link_info);
5021
5022 BFD_ASSERT (globals != NULL);
5023 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5024
5025 /* Check if this veneer has already been allocated. */
5026 if (globals->bx_glue_offset[reg])
5027 return;
5028
5029 s = bfd_get_section_by_name
5030 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5031
5032 BFD_ASSERT (s != NULL);
5033
5034 /* Add symbol for veneer. */
5035 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5036
5037 BFD_ASSERT (tmp_name);
5038
5039 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5040
5041 myh = elf_link_hash_lookup
5042 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5043
5044 BFD_ASSERT (myh == NULL);
5045
5046 bh = NULL;
5047 val = globals->bx_glue_size;
5048 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5049 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5050 NULL, TRUE, FALSE, &bh);
5051
5052 myh = (struct elf_link_hash_entry *) bh;
5053 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5054 myh->forced_local = 1;
5055
5056 s->size += ARM_BX_VENEER_SIZE;
5057 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5058 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5059 }
5060
5061
5062 /* Add an entry to the code/data map for section SEC. */
5063
5064 static void
5065 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5066 {
5067 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5068 unsigned int newidx;
5069
5070 if (sec_data->map == NULL)
5071 {
5072 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5073 sec_data->mapcount = 0;
5074 sec_data->mapsize = 1;
5075 }
5076
5077 newidx = sec_data->mapcount++;
5078
5079 if (sec_data->mapcount > sec_data->mapsize)
5080 {
5081 sec_data->mapsize *= 2;
5082 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5083 * sizeof (elf32_arm_section_map));
5084 }
5085
5086 if (sec_data->map)
5087 {
5088 sec_data->map[newidx].vma = vma;
5089 sec_data->map[newidx].type = type;
5090 }
5091 }
5092
5093
5094 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5095 veneers are handled for now. */
5096
5097 static bfd_vma
5098 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5099 elf32_vfp11_erratum_list *branch,
5100 bfd *branch_bfd,
5101 asection *branch_sec,
5102 unsigned int offset)
5103 {
5104 asection *s;
5105 struct elf32_arm_link_hash_table *hash_table;
5106 char *tmp_name;
5107 struct elf_link_hash_entry *myh;
5108 struct bfd_link_hash_entry *bh;
5109 bfd_vma val;
5110 struct _arm_elf_section_data *sec_data;
5111 int errcount;
5112 elf32_vfp11_erratum_list *newerr;
5113
5114 hash_table = elf32_arm_hash_table (link_info);
5115
5116 BFD_ASSERT (hash_table != NULL);
5117 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5118
5119 s = bfd_get_section_by_name
5120 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5121
5122 sec_data = elf32_arm_section_data (s);
5123
5124 BFD_ASSERT (s != NULL);
5125
5126 tmp_name = bfd_malloc ((bfd_size_type) strlen
5127 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5128
5129 BFD_ASSERT (tmp_name);
5130
5131 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5132 hash_table->num_vfp11_fixes);
5133
5134 myh = elf_link_hash_lookup
5135 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5136
5137 BFD_ASSERT (myh == NULL);
5138
5139 bh = NULL;
5140 val = hash_table->vfp11_erratum_glue_size;
5141 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5142 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5143 NULL, TRUE, FALSE, &bh);
5144
5145 myh = (struct elf_link_hash_entry *) bh;
5146 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5147 myh->forced_local = 1;
5148
5149 /* Link veneer back to calling location. */
5150 errcount = ++(sec_data->erratumcount);
5151 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5152
5153 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5154 newerr->vma = -1;
5155 newerr->u.v.branch = branch;
5156 newerr->u.v.id = hash_table->num_vfp11_fixes;
5157 branch->u.b.veneer = newerr;
5158
5159 newerr->next = sec_data->erratumlist;
5160 sec_data->erratumlist = newerr;
5161
5162 /* A symbol for the return from the veneer. */
5163 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5164 hash_table->num_vfp11_fixes);
5165
5166 myh = elf_link_hash_lookup
5167 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5168
5169 if (myh != NULL)
5170 abort ();
5171
5172 bh = NULL;
5173 val = offset + 4;
5174 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5175 branch_sec, val, NULL, TRUE, FALSE, &bh);
5176
5177 myh = (struct elf_link_hash_entry *) bh;
5178 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5179 myh->forced_local = 1;
5180
5181 free (tmp_name);
5182
5183 /* Generate a mapping symbol for the veneer section, and explicitly add an
5184 entry for that symbol to the code/data map for the section. */
5185 if (hash_table->vfp11_erratum_glue_size == 0)
5186 {
5187 bh = NULL;
5188 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5189 ever requires this erratum fix. */
5190 _bfd_generic_link_add_one_symbol (link_info,
5191 hash_table->bfd_of_glue_owner, "$a",
5192 BSF_LOCAL, s, 0, NULL,
5193 TRUE, FALSE, &bh);
5194
5195 myh = (struct elf_link_hash_entry *) bh;
5196 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5197 myh->forced_local = 1;
5198
5199 /* The elf32_arm_init_maps function only cares about symbols from input
5200 BFDs. We must make a note of this generated mapping symbol
5201 ourselves so that code byteswapping works properly in
5202 elf32_arm_write_section. */
5203 elf32_arm_section_map_add (s, 'a', 0);
5204 }
5205
5206 s->size += VFP11_ERRATUM_VENEER_SIZE;
5207 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5208 hash_table->num_vfp11_fixes++;
5209
5210 /* The offset of the veneer. */
5211 return val;
5212 }
5213
5214 #define ARM_GLUE_SECTION_FLAGS \
5215 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5216 | SEC_READONLY | SEC_LINKER_CREATED)
5217
5218 /* Create a fake section for use by the ARM backend of the linker. */
5219
5220 static bfd_boolean
5221 arm_make_glue_section (bfd * abfd, const char * name)
5222 {
5223 asection * sec;
5224
5225 sec = bfd_get_section_by_name (abfd, name);
5226 if (sec != NULL)
5227 /* Already made. */
5228 return TRUE;
5229
5230 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5231
5232 if (sec == NULL
5233 || !bfd_set_section_alignment (abfd, sec, 2))
5234 return FALSE;
5235
5236 /* Set the gc mark to prevent the section from being removed by garbage
5237 collection, despite the fact that no relocs refer to this section. */
5238 sec->gc_mark = 1;
5239
5240 return TRUE;
5241 }
5242
5243 /* Add the glue sections to ABFD. This function is called from the
5244 linker scripts in ld/emultempl/{armelf}.em. */
5245
5246 bfd_boolean
5247 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5248 struct bfd_link_info *info)
5249 {
5250 /* If we are only performing a partial
5251 link do not bother adding the glue. */
5252 if (info->relocatable)
5253 return TRUE;
5254
5255 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5256 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5257 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5258 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5259 }
5260
5261 /* Select a BFD to be used to hold the sections used by the glue code.
5262 This function is called from the linker scripts in ld/emultempl/
5263 {armelf/pe}.em. */
5264
5265 bfd_boolean
5266 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5267 {
5268 struct elf32_arm_link_hash_table *globals;
5269
5270 /* If we are only performing a partial link
5271 do not bother getting a bfd to hold the glue. */
5272 if (info->relocatable)
5273 return TRUE;
5274
5275 /* Make sure we don't attach the glue sections to a dynamic object. */
5276 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5277
5278 globals = elf32_arm_hash_table (info);
5279
5280 BFD_ASSERT (globals != NULL);
5281
5282 if (globals->bfd_of_glue_owner != NULL)
5283 return TRUE;
5284
5285 /* Save the bfd for later use. */
5286 globals->bfd_of_glue_owner = abfd;
5287
5288 return TRUE;
5289 }
5290
5291 static void
5292 check_use_blx (struct elf32_arm_link_hash_table *globals)
5293 {
5294 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5295 Tag_CPU_arch) > 2)
5296 globals->use_blx = 1;
5297 }
5298
5299 bfd_boolean
5300 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5301 struct bfd_link_info *link_info)
5302 {
5303 Elf_Internal_Shdr *symtab_hdr;
5304 Elf_Internal_Rela *internal_relocs = NULL;
5305 Elf_Internal_Rela *irel, *irelend;
5306 bfd_byte *contents = NULL;
5307
5308 asection *sec;
5309 struct elf32_arm_link_hash_table *globals;
5310
5311 /* If we are only performing a partial link do not bother
5312 to construct any glue. */
5313 if (link_info->relocatable)
5314 return TRUE;
5315
5316 /* Here we have a bfd that is to be included on the link. We have a
5317 hook to do reloc rummaging, before section sizes are nailed down. */
5318 globals = elf32_arm_hash_table (link_info);
5319
5320 BFD_ASSERT (globals != NULL);
5321
5322 check_use_blx (globals);
5323
5324 if (globals->byteswap_code && !bfd_big_endian (abfd))
5325 {
5326 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5327 abfd);
5328 return FALSE;
5329 }
5330
5331 /* PR 5398: If we have not decided to include any loadable sections in
5332 the output then we will not have a glue owner bfd. This is OK, it
5333 just means that there is nothing else for us to do here. */
5334 if (globals->bfd_of_glue_owner == NULL)
5335 return TRUE;
5336
5337 /* Rummage around all the relocs and map the glue vectors. */
5338 sec = abfd->sections;
5339
5340 if (sec == NULL)
5341 return TRUE;
5342
5343 for (; sec != NULL; sec = sec->next)
5344 {
5345 if (sec->reloc_count == 0)
5346 continue;
5347
5348 if ((sec->flags & SEC_EXCLUDE) != 0)
5349 continue;
5350
5351 symtab_hdr = & elf_symtab_hdr (abfd);
5352
5353 /* Load the relocs. */
5354 internal_relocs
5355 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5356
5357 if (internal_relocs == NULL)
5358 goto error_return;
5359
5360 irelend = internal_relocs + sec->reloc_count;
5361 for (irel = internal_relocs; irel < irelend; irel++)
5362 {
5363 long r_type;
5364 unsigned long r_index;
5365
5366 struct elf_link_hash_entry *h;
5367
5368 r_type = ELF32_R_TYPE (irel->r_info);
5369 r_index = ELF32_R_SYM (irel->r_info);
5370
5371 /* These are the only relocation types we care about. */
5372 if ( r_type != R_ARM_PC24
5373 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5374 continue;
5375
5376 /* Get the section contents if we haven't done so already. */
5377 if (contents == NULL)
5378 {
5379 /* Get cached copy if it exists. */
5380 if (elf_section_data (sec)->this_hdr.contents != NULL)
5381 contents = elf_section_data (sec)->this_hdr.contents;
5382 else
5383 {
5384 /* Go get them off disk. */
5385 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5386 goto error_return;
5387 }
5388 }
5389
5390 if (r_type == R_ARM_V4BX)
5391 {
5392 int reg;
5393
5394 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5395 record_arm_bx_glue (link_info, reg);
5396 continue;
5397 }
5398
5399 /* If the relocation is not against a symbol it cannot concern us. */
5400 h = NULL;
5401
5402 /* We don't care about local symbols. */
5403 if (r_index < symtab_hdr->sh_info)
5404 continue;
5405
5406 /* This is an external symbol. */
5407 r_index -= symtab_hdr->sh_info;
5408 h = (struct elf_link_hash_entry *)
5409 elf_sym_hashes (abfd)[r_index];
5410
5411 /* If the relocation is against a static symbol it must be within
5412 the current section and so cannot be a cross ARM/Thumb relocation. */
5413 if (h == NULL)
5414 continue;
5415
5416 /* If the call will go through a PLT entry then we do not need
5417 glue. */
5418 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5419 continue;
5420
5421 switch (r_type)
5422 {
5423 case R_ARM_PC24:
5424 /* This one is a call from arm code. We need to look up
5425 the target of the call. If it is a thumb target, we
5426 insert glue. */
5427 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5428 record_arm_to_thumb_glue (link_info, h);
5429 break;
5430
5431 default:
5432 abort ();
5433 }
5434 }
5435
5436 if (contents != NULL
5437 && elf_section_data (sec)->this_hdr.contents != contents)
5438 free (contents);
5439 contents = NULL;
5440
5441 if (internal_relocs != NULL
5442 && elf_section_data (sec)->relocs != internal_relocs)
5443 free (internal_relocs);
5444 internal_relocs = NULL;
5445 }
5446
5447 return TRUE;
5448
5449 error_return:
5450 if (contents != NULL
5451 && elf_section_data (sec)->this_hdr.contents != contents)
5452 free (contents);
5453 if (internal_relocs != NULL
5454 && elf_section_data (sec)->relocs != internal_relocs)
5455 free (internal_relocs);
5456
5457 return FALSE;
5458 }
5459 #endif
5460
5461
5462 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5463
5464 void
5465 bfd_elf32_arm_init_maps (bfd *abfd)
5466 {
5467 Elf_Internal_Sym *isymbuf;
5468 Elf_Internal_Shdr *hdr;
5469 unsigned int i, localsyms;
5470
5471 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5472 if (! is_arm_elf (abfd))
5473 return;
5474
5475 if ((abfd->flags & DYNAMIC) != 0)
5476 return;
5477
5478 hdr = & elf_symtab_hdr (abfd);
5479 localsyms = hdr->sh_info;
5480
5481 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5482 should contain the number of local symbols, which should come before any
5483 global symbols. Mapping symbols are always local. */
5484 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5485 NULL);
5486
5487 /* No internal symbols read? Skip this BFD. */
5488 if (isymbuf == NULL)
5489 return;
5490
5491 for (i = 0; i < localsyms; i++)
5492 {
5493 Elf_Internal_Sym *isym = &isymbuf[i];
5494 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5495 const char *name;
5496
5497 if (sec != NULL
5498 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5499 {
5500 name = bfd_elf_string_from_elf_section (abfd,
5501 hdr->sh_link, isym->st_name);
5502
5503 if (bfd_is_arm_special_symbol_name (name,
5504 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5505 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5506 }
5507 }
5508 }
5509
5510
5511 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5512 say what they wanted. */
5513
5514 void
5515 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5516 {
5517 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5518 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5519
5520 if (globals->fix_cortex_a8 == -1)
5521 {
5522 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5523 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5524 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5525 || out_attr[Tag_CPU_arch_profile].i == 0))
5526 globals->fix_cortex_a8 = 1;
5527 else
5528 globals->fix_cortex_a8 = 0;
5529 }
5530 }
5531
5532
5533 void
5534 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5535 {
5536 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5537 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5538
5539 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5540 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5541 {
5542 switch (globals->vfp11_fix)
5543 {
5544 case BFD_ARM_VFP11_FIX_DEFAULT:
5545 case BFD_ARM_VFP11_FIX_NONE:
5546 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5547 break;
5548
5549 default:
5550 /* Give a warning, but do as the user requests anyway. */
5551 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5552 "workaround is not necessary for target architecture"), obfd);
5553 }
5554 }
5555 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5556 /* For earlier architectures, we might need the workaround, but do not
5557 enable it by default. If users is running with broken hardware, they
5558 must enable the erratum fix explicitly. */
5559 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5560 }
5561
5562
5563 enum bfd_arm_vfp11_pipe
5564 {
5565 VFP11_FMAC,
5566 VFP11_LS,
5567 VFP11_DS,
5568 VFP11_BAD
5569 };
5570
5571 /* Return a VFP register number. This is encoded as RX:X for single-precision
5572 registers, or X:RX for double-precision registers, where RX is the group of
5573 four bits in the instruction encoding and X is the single extension bit.
5574 RX and X fields are specified using their lowest (starting) bit. The return
5575 value is:
5576
5577 0...31: single-precision registers s0...s31
5578 32...63: double-precision registers d0...d31.
5579
5580 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5581 encounter VFP3 instructions, so we allow the full range for DP registers. */
5582
5583 static unsigned int
5584 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5585 unsigned int x)
5586 {
5587 if (is_double)
5588 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5589 else
5590 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5591 }
5592
5593 /* Set bits in *WMASK according to a register number REG as encoded by
5594 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5595
5596 static void
5597 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5598 {
5599 if (reg < 32)
5600 *wmask |= 1 << reg;
5601 else if (reg < 48)
5602 *wmask |= 3 << ((reg - 32) * 2);
5603 }
5604
5605 /* Return TRUE if WMASK overwrites anything in REGS. */
5606
5607 static bfd_boolean
5608 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5609 {
5610 int i;
5611
5612 for (i = 0; i < numregs; i++)
5613 {
5614 unsigned int reg = regs[i];
5615
5616 if (reg < 32 && (wmask & (1 << reg)) != 0)
5617 return TRUE;
5618
5619 reg -= 32;
5620
5621 if (reg >= 16)
5622 continue;
5623
5624 if ((wmask & (3 << (reg * 2))) != 0)
5625 return TRUE;
5626 }
5627
5628 return FALSE;
5629 }
5630
5631 /* In this function, we're interested in two things: finding input registers
5632 for VFP data-processing instructions, and finding the set of registers which
5633 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5634 hold the written set, so FLDM etc. are easy to deal with (we're only
5635 interested in 32 SP registers or 16 dp registers, due to the VFP version
5636 implemented by the chip in question). DP registers are marked by setting
5637 both SP registers in the write mask). */
5638
5639 static enum bfd_arm_vfp11_pipe
5640 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5641 int *numregs)
5642 {
5643 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5644 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5645
5646 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5647 {
5648 unsigned int pqrs;
5649 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5650 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5651
5652 pqrs = ((insn & 0x00800000) >> 20)
5653 | ((insn & 0x00300000) >> 19)
5654 | ((insn & 0x00000040) >> 6);
5655
5656 switch (pqrs)
5657 {
5658 case 0: /* fmac[sd]. */
5659 case 1: /* fnmac[sd]. */
5660 case 2: /* fmsc[sd]. */
5661 case 3: /* fnmsc[sd]. */
5662 pipe = VFP11_FMAC;
5663 bfd_arm_vfp11_write_mask (destmask, fd);
5664 regs[0] = fd;
5665 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5666 regs[2] = fm;
5667 *numregs = 3;
5668 break;
5669
5670 case 4: /* fmul[sd]. */
5671 case 5: /* fnmul[sd]. */
5672 case 6: /* fadd[sd]. */
5673 case 7: /* fsub[sd]. */
5674 pipe = VFP11_FMAC;
5675 goto vfp_binop;
5676
5677 case 8: /* fdiv[sd]. */
5678 pipe = VFP11_DS;
5679 vfp_binop:
5680 bfd_arm_vfp11_write_mask (destmask, fd);
5681 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5682 regs[1] = fm;
5683 *numregs = 2;
5684 break;
5685
5686 case 15: /* extended opcode. */
5687 {
5688 unsigned int extn = ((insn >> 15) & 0x1e)
5689 | ((insn >> 7) & 1);
5690
5691 switch (extn)
5692 {
5693 case 0: /* fcpy[sd]. */
5694 case 1: /* fabs[sd]. */
5695 case 2: /* fneg[sd]. */
5696 case 8: /* fcmp[sd]. */
5697 case 9: /* fcmpe[sd]. */
5698 case 10: /* fcmpz[sd]. */
5699 case 11: /* fcmpez[sd]. */
5700 case 16: /* fuito[sd]. */
5701 case 17: /* fsito[sd]. */
5702 case 24: /* ftoui[sd]. */
5703 case 25: /* ftouiz[sd]. */
5704 case 26: /* ftosi[sd]. */
5705 case 27: /* ftosiz[sd]. */
5706 /* These instructions will not bounce due to underflow. */
5707 *numregs = 0;
5708 pipe = VFP11_FMAC;
5709 break;
5710
5711 case 3: /* fsqrt[sd]. */
5712 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5713 registers to cause the erratum in previous instructions. */
5714 bfd_arm_vfp11_write_mask (destmask, fd);
5715 pipe = VFP11_DS;
5716 break;
5717
5718 case 15: /* fcvt{ds,sd}. */
5719 {
5720 int rnum = 0;
5721
5722 bfd_arm_vfp11_write_mask (destmask, fd);
5723
5724 /* Only FCVTSD can underflow. */
5725 if ((insn & 0x100) != 0)
5726 regs[rnum++] = fm;
5727
5728 *numregs = rnum;
5729
5730 pipe = VFP11_FMAC;
5731 }
5732 break;
5733
5734 default:
5735 return VFP11_BAD;
5736 }
5737 }
5738 break;
5739
5740 default:
5741 return VFP11_BAD;
5742 }
5743 }
5744 /* Two-register transfer. */
5745 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5746 {
5747 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5748
5749 if ((insn & 0x100000) == 0)
5750 {
5751 if (is_double)
5752 bfd_arm_vfp11_write_mask (destmask, fm);
5753 else
5754 {
5755 bfd_arm_vfp11_write_mask (destmask, fm);
5756 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5757 }
5758 }
5759
5760 pipe = VFP11_LS;
5761 }
5762 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5763 {
5764 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5765 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5766
5767 switch (puw)
5768 {
5769 case 0: /* Two-reg transfer. We should catch these above. */
5770 abort ();
5771
5772 case 2: /* fldm[sdx]. */
5773 case 3:
5774 case 5:
5775 {
5776 unsigned int i, offset = insn & 0xff;
5777
5778 if (is_double)
5779 offset >>= 1;
5780
5781 for (i = fd; i < fd + offset; i++)
5782 bfd_arm_vfp11_write_mask (destmask, i);
5783 }
5784 break;
5785
5786 case 4: /* fld[sd]. */
5787 case 6:
5788 bfd_arm_vfp11_write_mask (destmask, fd);
5789 break;
5790
5791 default:
5792 return VFP11_BAD;
5793 }
5794
5795 pipe = VFP11_LS;
5796 }
5797 /* Single-register transfer. Note L==0. */
5798 else if ((insn & 0x0f100e10) == 0x0e000a10)
5799 {
5800 unsigned int opcode = (insn >> 21) & 7;
5801 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5802
5803 switch (opcode)
5804 {
5805 case 0: /* fmsr/fmdlr. */
5806 case 1: /* fmdhr. */
5807 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5808 destination register. I don't know if this is exactly right,
5809 but it is the conservative choice. */
5810 bfd_arm_vfp11_write_mask (destmask, fn);
5811 break;
5812
5813 case 7: /* fmxr. */
5814 break;
5815 }
5816
5817 pipe = VFP11_LS;
5818 }
5819
5820 return pipe;
5821 }
5822
5823
5824 static int elf32_arm_compare_mapping (const void * a, const void * b);
5825
5826
5827 /* Look for potentially-troublesome code sequences which might trigger the
5828 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5829 (available from ARM) for details of the erratum. A short version is
5830 described in ld.texinfo. */
5831
5832 bfd_boolean
5833 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5834 {
5835 asection *sec;
5836 bfd_byte *contents = NULL;
5837 int state = 0;
5838 int regs[3], numregs = 0;
5839 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5840 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5841
5842 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5843 The states transition as follows:
5844
5845 0 -> 1 (vector) or 0 -> 2 (scalar)
5846 A VFP FMAC-pipeline instruction has been seen. Fill
5847 regs[0]..regs[numregs-1] with its input operands. Remember this
5848 instruction in 'first_fmac'.
5849
5850 1 -> 2
5851 Any instruction, except for a VFP instruction which overwrites
5852 regs[*].
5853
5854 1 -> 3 [ -> 0 ] or
5855 2 -> 3 [ -> 0 ]
5856 A VFP instruction has been seen which overwrites any of regs[*].
5857 We must make a veneer! Reset state to 0 before examining next
5858 instruction.
5859
5860 2 -> 0
5861 If we fail to match anything in state 2, reset to state 0 and reset
5862 the instruction pointer to the instruction after 'first_fmac'.
5863
5864 If the VFP11 vector mode is in use, there must be at least two unrelated
5865 instructions between anti-dependent VFP11 instructions to properly avoid
5866 triggering the erratum, hence the use of the extra state 1. */
5867
5868 /* If we are only performing a partial link do not bother
5869 to construct any glue. */
5870 if (link_info->relocatable)
5871 return TRUE;
5872
5873 /* Skip if this bfd does not correspond to an ELF image. */
5874 if (! is_arm_elf (abfd))
5875 return TRUE;
5876
5877 /* We should have chosen a fix type by the time we get here. */
5878 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5879
5880 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5881 return TRUE;
5882
5883 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5884 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5885 return TRUE;
5886
5887 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5888 {
5889 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5890 struct _arm_elf_section_data *sec_data;
5891
5892 /* If we don't have executable progbits, we're not interested in this
5893 section. Also skip if section is to be excluded. */
5894 if (elf_section_type (sec) != SHT_PROGBITS
5895 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5896 || (sec->flags & SEC_EXCLUDE) != 0
5897 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5898 || sec->output_section == bfd_abs_section_ptr
5899 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5900 continue;
5901
5902 sec_data = elf32_arm_section_data (sec);
5903
5904 if (sec_data->mapcount == 0)
5905 continue;
5906
5907 if (elf_section_data (sec)->this_hdr.contents != NULL)
5908 contents = elf_section_data (sec)->this_hdr.contents;
5909 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5910 goto error_return;
5911
5912 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5913 elf32_arm_compare_mapping);
5914
5915 for (span = 0; span < sec_data->mapcount; span++)
5916 {
5917 unsigned int span_start = sec_data->map[span].vma;
5918 unsigned int span_end = (span == sec_data->mapcount - 1)
5919 ? sec->size : sec_data->map[span + 1].vma;
5920 char span_type = sec_data->map[span].type;
5921
5922 /* FIXME: Only ARM mode is supported at present. We may need to
5923 support Thumb-2 mode also at some point. */
5924 if (span_type != 'a')
5925 continue;
5926
5927 for (i = span_start; i < span_end;)
5928 {
5929 unsigned int next_i = i + 4;
5930 unsigned int insn = bfd_big_endian (abfd)
5931 ? (contents[i] << 24)
5932 | (contents[i + 1] << 16)
5933 | (contents[i + 2] << 8)
5934 | contents[i + 3]
5935 : (contents[i + 3] << 24)
5936 | (contents[i + 2] << 16)
5937 | (contents[i + 1] << 8)
5938 | contents[i];
5939 unsigned int writemask = 0;
5940 enum bfd_arm_vfp11_pipe pipe;
5941
5942 switch (state)
5943 {
5944 case 0:
5945 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5946 &numregs);
5947 /* I'm assuming the VFP11 erratum can trigger with denorm
5948 operands on either the FMAC or the DS pipeline. This might
5949 lead to slightly overenthusiastic veneer insertion. */
5950 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5951 {
5952 state = use_vector ? 1 : 2;
5953 first_fmac = i;
5954 veneer_of_insn = insn;
5955 }
5956 break;
5957
5958 case 1:
5959 {
5960 int other_regs[3], other_numregs;
5961 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5962 other_regs,
5963 &other_numregs);
5964 if (pipe != VFP11_BAD
5965 && bfd_arm_vfp11_antidependency (writemask, regs,
5966 numregs))
5967 state = 3;
5968 else
5969 state = 2;
5970 }
5971 break;
5972
5973 case 2:
5974 {
5975 int other_regs[3], other_numregs;
5976 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5977 other_regs,
5978 &other_numregs);
5979 if (pipe != VFP11_BAD
5980 && bfd_arm_vfp11_antidependency (writemask, regs,
5981 numregs))
5982 state = 3;
5983 else
5984 {
5985 state = 0;
5986 next_i = first_fmac + 4;
5987 }
5988 }
5989 break;
5990
5991 case 3:
5992 abort (); /* Should be unreachable. */
5993 }
5994
5995 if (state == 3)
5996 {
5997 elf32_vfp11_erratum_list *newerr
5998 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5999 int errcount;
6000
6001 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6002
6003 newerr->u.b.vfp_insn = veneer_of_insn;
6004
6005 switch (span_type)
6006 {
6007 case 'a':
6008 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6009 break;
6010
6011 default:
6012 abort ();
6013 }
6014
6015 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6016 first_fmac);
6017
6018 newerr->vma = -1;
6019
6020 newerr->next = sec_data->erratumlist;
6021 sec_data->erratumlist = newerr;
6022
6023 state = 0;
6024 }
6025
6026 i = next_i;
6027 }
6028 }
6029
6030 if (contents != NULL
6031 && elf_section_data (sec)->this_hdr.contents != contents)
6032 free (contents);
6033 contents = NULL;
6034 }
6035
6036 return TRUE;
6037
6038 error_return:
6039 if (contents != NULL
6040 && elf_section_data (sec)->this_hdr.contents != contents)
6041 free (contents);
6042
6043 return FALSE;
6044 }
6045
6046 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6047 after sections have been laid out, using specially-named symbols. */
6048
6049 void
6050 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6051 struct bfd_link_info *link_info)
6052 {
6053 asection *sec;
6054 struct elf32_arm_link_hash_table *globals;
6055 char *tmp_name;
6056
6057 if (link_info->relocatable)
6058 return;
6059
6060 /* Skip if this bfd does not correspond to an ELF image. */
6061 if (! is_arm_elf (abfd))
6062 return;
6063
6064 globals = elf32_arm_hash_table (link_info);
6065
6066 tmp_name = bfd_malloc ((bfd_size_type) strlen
6067 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6068
6069 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6070 {
6071 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6072 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6073
6074 for (; errnode != NULL; errnode = errnode->next)
6075 {
6076 struct elf_link_hash_entry *myh;
6077 bfd_vma vma;
6078
6079 switch (errnode->type)
6080 {
6081 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6082 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6083 /* Find veneer symbol. */
6084 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6085 errnode->u.b.veneer->u.v.id);
6086
6087 myh = elf_link_hash_lookup
6088 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6089
6090 if (myh == NULL)
6091 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6092 "`%s'"), abfd, tmp_name);
6093
6094 vma = myh->root.u.def.section->output_section->vma
6095 + myh->root.u.def.section->output_offset
6096 + myh->root.u.def.value;
6097
6098 errnode->u.b.veneer->vma = vma;
6099 break;
6100
6101 case VFP11_ERRATUM_ARM_VENEER:
6102 case VFP11_ERRATUM_THUMB_VENEER:
6103 /* Find return location. */
6104 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6105 errnode->u.v.id);
6106
6107 myh = elf_link_hash_lookup
6108 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6109
6110 if (myh == NULL)
6111 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6112 "`%s'"), abfd, tmp_name);
6113
6114 vma = myh->root.u.def.section->output_section->vma
6115 + myh->root.u.def.section->output_offset
6116 + myh->root.u.def.value;
6117
6118 errnode->u.v.branch->vma = vma;
6119 break;
6120
6121 default:
6122 abort ();
6123 }
6124 }
6125 }
6126
6127 free (tmp_name);
6128 }
6129
6130
6131 /* Set target relocation values needed during linking. */
6132
6133 void
6134 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6135 struct bfd_link_info *link_info,
6136 int target1_is_rel,
6137 char * target2_type,
6138 int fix_v4bx,
6139 int use_blx,
6140 bfd_arm_vfp11_fix vfp11_fix,
6141 int no_enum_warn, int no_wchar_warn,
6142 int pic_veneer, int fix_cortex_a8)
6143 {
6144 struct elf32_arm_link_hash_table *globals;
6145
6146 globals = elf32_arm_hash_table (link_info);
6147
6148 globals->target1_is_rel = target1_is_rel;
6149 if (strcmp (target2_type, "rel") == 0)
6150 globals->target2_reloc = R_ARM_REL32;
6151 else if (strcmp (target2_type, "abs") == 0)
6152 globals->target2_reloc = R_ARM_ABS32;
6153 else if (strcmp (target2_type, "got-rel") == 0)
6154 globals->target2_reloc = R_ARM_GOT_PREL;
6155 else
6156 {
6157 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6158 target2_type);
6159 }
6160 globals->fix_v4bx = fix_v4bx;
6161 globals->use_blx |= use_blx;
6162 globals->vfp11_fix = vfp11_fix;
6163 globals->pic_veneer = pic_veneer;
6164 globals->fix_cortex_a8 = fix_cortex_a8;
6165
6166 BFD_ASSERT (is_arm_elf (output_bfd));
6167 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6168 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6169 }
6170
6171 /* Replace the target offset of a Thumb bl or b.w instruction. */
6172
6173 static void
6174 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6175 {
6176 bfd_vma upper;
6177 bfd_vma lower;
6178 int reloc_sign;
6179
6180 BFD_ASSERT ((offset & 1) == 0);
6181
6182 upper = bfd_get_16 (abfd, insn);
6183 lower = bfd_get_16 (abfd, insn + 2);
6184 reloc_sign = (offset < 0) ? 1 : 0;
6185 upper = (upper & ~(bfd_vma) 0x7ff)
6186 | ((offset >> 12) & 0x3ff)
6187 | (reloc_sign << 10);
6188 lower = (lower & ~(bfd_vma) 0x2fff)
6189 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6190 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6191 | ((offset >> 1) & 0x7ff);
6192 bfd_put_16 (abfd, upper, insn);
6193 bfd_put_16 (abfd, lower, insn + 2);
6194 }
6195
6196 /* Thumb code calling an ARM function. */
6197
6198 static int
6199 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6200 const char * name,
6201 bfd * input_bfd,
6202 bfd * output_bfd,
6203 asection * input_section,
6204 bfd_byte * hit_data,
6205 asection * sym_sec,
6206 bfd_vma offset,
6207 bfd_signed_vma addend,
6208 bfd_vma val,
6209 char **error_message)
6210 {
6211 asection * s = 0;
6212 bfd_vma my_offset;
6213 long int ret_offset;
6214 struct elf_link_hash_entry * myh;
6215 struct elf32_arm_link_hash_table * globals;
6216
6217 myh = find_thumb_glue (info, name, error_message);
6218 if (myh == NULL)
6219 return FALSE;
6220
6221 globals = elf32_arm_hash_table (info);
6222
6223 BFD_ASSERT (globals != NULL);
6224 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6225
6226 my_offset = myh->root.u.def.value;
6227
6228 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6229 THUMB2ARM_GLUE_SECTION_NAME);
6230
6231 BFD_ASSERT (s != NULL);
6232 BFD_ASSERT (s->contents != NULL);
6233 BFD_ASSERT (s->output_section != NULL);
6234
6235 if ((my_offset & 0x01) == 0x01)
6236 {
6237 if (sym_sec != NULL
6238 && sym_sec->owner != NULL
6239 && !INTERWORK_FLAG (sym_sec->owner))
6240 {
6241 (*_bfd_error_handler)
6242 (_("%B(%s): warning: interworking not enabled.\n"
6243 " first occurrence: %B: thumb call to arm"),
6244 sym_sec->owner, input_bfd, name);
6245
6246 return FALSE;
6247 }
6248
6249 --my_offset;
6250 myh->root.u.def.value = my_offset;
6251
6252 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6253 s->contents + my_offset);
6254
6255 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6256 s->contents + my_offset + 2);
6257
6258 ret_offset =
6259 /* Address of destination of the stub. */
6260 ((bfd_signed_vma) val)
6261 - ((bfd_signed_vma)
6262 /* Offset from the start of the current section
6263 to the start of the stubs. */
6264 (s->output_offset
6265 /* Offset of the start of this stub from the start of the stubs. */
6266 + my_offset
6267 /* Address of the start of the current section. */
6268 + s->output_section->vma)
6269 /* The branch instruction is 4 bytes into the stub. */
6270 + 4
6271 /* ARM branches work from the pc of the instruction + 8. */
6272 + 8);
6273
6274 put_arm_insn (globals, output_bfd,
6275 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6276 s->contents + my_offset + 4);
6277 }
6278
6279 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6280
6281 /* Now go back and fix up the original BL insn to point to here. */
6282 ret_offset =
6283 /* Address of where the stub is located. */
6284 (s->output_section->vma + s->output_offset + my_offset)
6285 /* Address of where the BL is located. */
6286 - (input_section->output_section->vma + input_section->output_offset
6287 + offset)
6288 /* Addend in the relocation. */
6289 - addend
6290 /* Biassing for PC-relative addressing. */
6291 - 8;
6292
6293 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6294
6295 return TRUE;
6296 }
6297
6298 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6299
6300 static struct elf_link_hash_entry *
6301 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6302 const char * name,
6303 bfd * input_bfd,
6304 bfd * output_bfd,
6305 asection * sym_sec,
6306 bfd_vma val,
6307 asection * s,
6308 char ** error_message)
6309 {
6310 bfd_vma my_offset;
6311 long int ret_offset;
6312 struct elf_link_hash_entry * myh;
6313 struct elf32_arm_link_hash_table * globals;
6314
6315 myh = find_arm_glue (info, name, error_message);
6316 if (myh == NULL)
6317 return NULL;
6318
6319 globals = elf32_arm_hash_table (info);
6320
6321 BFD_ASSERT (globals != NULL);
6322 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6323
6324 my_offset = myh->root.u.def.value;
6325
6326 if ((my_offset & 0x01) == 0x01)
6327 {
6328 if (sym_sec != NULL
6329 && sym_sec->owner != NULL
6330 && !INTERWORK_FLAG (sym_sec->owner))
6331 {
6332 (*_bfd_error_handler)
6333 (_("%B(%s): warning: interworking not enabled.\n"
6334 " first occurrence: %B: arm call to thumb"),
6335 sym_sec->owner, input_bfd, name);
6336 }
6337
6338 --my_offset;
6339 myh->root.u.def.value = my_offset;
6340
6341 if (info->shared || globals->root.is_relocatable_executable
6342 || globals->pic_veneer)
6343 {
6344 /* For relocatable objects we can't use absolute addresses,
6345 so construct the address from a relative offset. */
6346 /* TODO: If the offset is small it's probably worth
6347 constructing the address with adds. */
6348 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6349 s->contents + my_offset);
6350 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6351 s->contents + my_offset + 4);
6352 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6353 s->contents + my_offset + 8);
6354 /* Adjust the offset by 4 for the position of the add,
6355 and 8 for the pipeline offset. */
6356 ret_offset = (val - (s->output_offset
6357 + s->output_section->vma
6358 + my_offset + 12))
6359 | 1;
6360 bfd_put_32 (output_bfd, ret_offset,
6361 s->contents + my_offset + 12);
6362 }
6363 else if (globals->use_blx)
6364 {
6365 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6366 s->contents + my_offset);
6367
6368 /* It's a thumb address. Add the low order bit. */
6369 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6370 s->contents + my_offset + 4);
6371 }
6372 else
6373 {
6374 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6375 s->contents + my_offset);
6376
6377 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6378 s->contents + my_offset + 4);
6379
6380 /* It's a thumb address. Add the low order bit. */
6381 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6382 s->contents + my_offset + 8);
6383
6384 my_offset += 12;
6385 }
6386 }
6387
6388 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6389
6390 return myh;
6391 }
6392
6393 /* Arm code calling a Thumb function. */
6394
6395 static int
6396 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6397 const char * name,
6398 bfd * input_bfd,
6399 bfd * output_bfd,
6400 asection * input_section,
6401 bfd_byte * hit_data,
6402 asection * sym_sec,
6403 bfd_vma offset,
6404 bfd_signed_vma addend,
6405 bfd_vma val,
6406 char **error_message)
6407 {
6408 unsigned long int tmp;
6409 bfd_vma my_offset;
6410 asection * s;
6411 long int ret_offset;
6412 struct elf_link_hash_entry * myh;
6413 struct elf32_arm_link_hash_table * globals;
6414
6415 globals = elf32_arm_hash_table (info);
6416
6417 BFD_ASSERT (globals != NULL);
6418 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6419
6420 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6421 ARM2THUMB_GLUE_SECTION_NAME);
6422 BFD_ASSERT (s != NULL);
6423 BFD_ASSERT (s->contents != NULL);
6424 BFD_ASSERT (s->output_section != NULL);
6425
6426 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6427 sym_sec, val, s, error_message);
6428 if (!myh)
6429 return FALSE;
6430
6431 my_offset = myh->root.u.def.value;
6432 tmp = bfd_get_32 (input_bfd, hit_data);
6433 tmp = tmp & 0xFF000000;
6434
6435 /* Somehow these are both 4 too far, so subtract 8. */
6436 ret_offset = (s->output_offset
6437 + my_offset
6438 + s->output_section->vma
6439 - (input_section->output_offset
6440 + input_section->output_section->vma
6441 + offset + addend)
6442 - 8);
6443
6444 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6445
6446 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6447
6448 return TRUE;
6449 }
6450
6451 /* Populate Arm stub for an exported Thumb function. */
6452
6453 static bfd_boolean
6454 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6455 {
6456 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6457 asection * s;
6458 struct elf_link_hash_entry * myh;
6459 struct elf32_arm_link_hash_entry *eh;
6460 struct elf32_arm_link_hash_table * globals;
6461 asection *sec;
6462 bfd_vma val;
6463 char *error_message;
6464
6465 eh = elf32_arm_hash_entry (h);
6466 /* Allocate stubs for exported Thumb functions on v4t. */
6467 if (eh->export_glue == NULL)
6468 return TRUE;
6469
6470 globals = elf32_arm_hash_table (info);
6471
6472 BFD_ASSERT (globals != NULL);
6473 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6474
6475 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6476 ARM2THUMB_GLUE_SECTION_NAME);
6477 BFD_ASSERT (s != NULL);
6478 BFD_ASSERT (s->contents != NULL);
6479 BFD_ASSERT (s->output_section != NULL);
6480
6481 sec = eh->export_glue->root.u.def.section;
6482
6483 BFD_ASSERT (sec->output_section != NULL);
6484
6485 val = eh->export_glue->root.u.def.value + sec->output_offset
6486 + sec->output_section->vma;
6487
6488 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6489 h->root.u.def.section->owner,
6490 globals->obfd, sec, val, s,
6491 &error_message);
6492 BFD_ASSERT (myh);
6493 return TRUE;
6494 }
6495
6496 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6497
6498 static bfd_vma
6499 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6500 {
6501 bfd_byte *p;
6502 bfd_vma glue_addr;
6503 asection *s;
6504 struct elf32_arm_link_hash_table *globals;
6505
6506 globals = elf32_arm_hash_table (info);
6507
6508 BFD_ASSERT (globals != NULL);
6509 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6510
6511 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6512 ARM_BX_GLUE_SECTION_NAME);
6513 BFD_ASSERT (s != NULL);
6514 BFD_ASSERT (s->contents != NULL);
6515 BFD_ASSERT (s->output_section != NULL);
6516
6517 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6518
6519 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6520
6521 if ((globals->bx_glue_offset[reg] & 1) == 0)
6522 {
6523 p = s->contents + glue_addr;
6524 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6525 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6526 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6527 globals->bx_glue_offset[reg] |= 1;
6528 }
6529
6530 return glue_addr + s->output_section->vma + s->output_offset;
6531 }
6532
6533 /* Generate Arm stubs for exported Thumb symbols. */
6534 static void
6535 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6536 struct bfd_link_info *link_info)
6537 {
6538 struct elf32_arm_link_hash_table * globals;
6539
6540 if (link_info == NULL)
6541 /* Ignore this if we are not called by the ELF backend linker. */
6542 return;
6543
6544 globals = elf32_arm_hash_table (link_info);
6545 /* If blx is available then exported Thumb symbols are OK and there is
6546 nothing to do. */
6547 if (globals->use_blx)
6548 return;
6549
6550 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6551 link_info);
6552 }
6553
6554 /* Some relocations map to different relocations depending on the
6555 target. Return the real relocation. */
6556
6557 static int
6558 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6559 int r_type)
6560 {
6561 switch (r_type)
6562 {
6563 case R_ARM_TARGET1:
6564 if (globals->target1_is_rel)
6565 return R_ARM_REL32;
6566 else
6567 return R_ARM_ABS32;
6568
6569 case R_ARM_TARGET2:
6570 return globals->target2_reloc;
6571
6572 default:
6573 return r_type;
6574 }
6575 }
6576
6577 /* Return the base VMA address which should be subtracted from real addresses
6578 when resolving @dtpoff relocation.
6579 This is PT_TLS segment p_vaddr. */
6580
6581 static bfd_vma
6582 dtpoff_base (struct bfd_link_info *info)
6583 {
6584 /* If tls_sec is NULL, we should have signalled an error already. */
6585 if (elf_hash_table (info)->tls_sec == NULL)
6586 return 0;
6587 return elf_hash_table (info)->tls_sec->vma;
6588 }
6589
6590 /* Return the relocation value for @tpoff relocation
6591 if STT_TLS virtual address is ADDRESS. */
6592
6593 static bfd_vma
6594 tpoff (struct bfd_link_info *info, bfd_vma address)
6595 {
6596 struct elf_link_hash_table *htab = elf_hash_table (info);
6597 bfd_vma base;
6598
6599 /* If tls_sec is NULL, we should have signalled an error already. */
6600 if (htab->tls_sec == NULL)
6601 return 0;
6602 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6603 return address - htab->tls_sec->vma + base;
6604 }
6605
6606 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6607 VALUE is the relocation value. */
6608
6609 static bfd_reloc_status_type
6610 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6611 {
6612 if (value > 0xfff)
6613 return bfd_reloc_overflow;
6614
6615 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6616 bfd_put_32 (abfd, value, data);
6617 return bfd_reloc_ok;
6618 }
6619
6620 /* For a given value of n, calculate the value of G_n as required to
6621 deal with group relocations. We return it in the form of an
6622 encoded constant-and-rotation, together with the final residual. If n is
6623 specified as less than zero, then final_residual is filled with the
6624 input value and no further action is performed. */
6625
6626 static bfd_vma
6627 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6628 {
6629 int current_n;
6630 bfd_vma g_n;
6631 bfd_vma encoded_g_n = 0;
6632 bfd_vma residual = value; /* Also known as Y_n. */
6633
6634 for (current_n = 0; current_n <= n; current_n++)
6635 {
6636 int shift;
6637
6638 /* Calculate which part of the value to mask. */
6639 if (residual == 0)
6640 shift = 0;
6641 else
6642 {
6643 int msb;
6644
6645 /* Determine the most significant bit in the residual and
6646 align the resulting value to a 2-bit boundary. */
6647 for (msb = 30; msb >= 0; msb -= 2)
6648 if (residual & (3 << msb))
6649 break;
6650
6651 /* The desired shift is now (msb - 6), or zero, whichever
6652 is the greater. */
6653 shift = msb - 6;
6654 if (shift < 0)
6655 shift = 0;
6656 }
6657
6658 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6659 g_n = residual & (0xff << shift);
6660 encoded_g_n = (g_n >> shift)
6661 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6662
6663 /* Calculate the residual for the next time around. */
6664 residual &= ~g_n;
6665 }
6666
6667 *final_residual = residual;
6668
6669 return encoded_g_n;
6670 }
6671
6672 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6673 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6674
6675 static int
6676 identify_add_or_sub (bfd_vma insn)
6677 {
6678 int opcode = insn & 0x1e00000;
6679
6680 if (opcode == 1 << 23) /* ADD */
6681 return 1;
6682
6683 if (opcode == 1 << 22) /* SUB */
6684 return -1;
6685
6686 return 0;
6687 }
6688
6689 /* Perform a relocation as part of a final link. */
6690
6691 static bfd_reloc_status_type
6692 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6693 bfd * input_bfd,
6694 bfd * output_bfd,
6695 asection * input_section,
6696 bfd_byte * contents,
6697 Elf_Internal_Rela * rel,
6698 bfd_vma value,
6699 struct bfd_link_info * info,
6700 asection * sym_sec,
6701 const char * sym_name,
6702 int sym_flags,
6703 struct elf_link_hash_entry * h,
6704 bfd_boolean * unresolved_reloc_p,
6705 char ** error_message)
6706 {
6707 unsigned long r_type = howto->type;
6708 unsigned long r_symndx;
6709 bfd_byte * hit_data = contents + rel->r_offset;
6710 bfd * dynobj = NULL;
6711 Elf_Internal_Shdr * symtab_hdr;
6712 struct elf_link_hash_entry ** sym_hashes;
6713 bfd_vma * local_got_offsets;
6714 asection * sgot = NULL;
6715 asection * splt = NULL;
6716 asection * sreloc = NULL;
6717 bfd_vma addend;
6718 bfd_signed_vma signed_addend;
6719 struct elf32_arm_link_hash_table * globals;
6720
6721 globals = elf32_arm_hash_table (info);
6722
6723 BFD_ASSERT (is_arm_elf (input_bfd));
6724
6725 /* Some relocation types map to different relocations depending on the
6726 target. We pick the right one here. */
6727 r_type = arm_real_reloc_type (globals, r_type);
6728 if (r_type != howto->type)
6729 howto = elf32_arm_howto_from_type (r_type);
6730
6731 /* If the start address has been set, then set the EF_ARM_HASENTRY
6732 flag. Setting this more than once is redundant, but the cost is
6733 not too high, and it keeps the code simple.
6734
6735 The test is done here, rather than somewhere else, because the
6736 start address is only set just before the final link commences.
6737
6738 Note - if the user deliberately sets a start address of 0, the
6739 flag will not be set. */
6740 if (bfd_get_start_address (output_bfd) != 0)
6741 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6742
6743 dynobj = elf_hash_table (info)->dynobj;
6744 if (dynobj)
6745 {
6746 sgot = bfd_get_section_by_name (dynobj, ".got");
6747 splt = bfd_get_section_by_name (dynobj, ".plt");
6748 }
6749 symtab_hdr = & elf_symtab_hdr (input_bfd);
6750 sym_hashes = elf_sym_hashes (input_bfd);
6751 local_got_offsets = elf_local_got_offsets (input_bfd);
6752 r_symndx = ELF32_R_SYM (rel->r_info);
6753
6754 if (globals->use_rel)
6755 {
6756 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6757
6758 if (addend & ((howto->src_mask + 1) >> 1))
6759 {
6760 signed_addend = -1;
6761 signed_addend &= ~ howto->src_mask;
6762 signed_addend |= addend;
6763 }
6764 else
6765 signed_addend = addend;
6766 }
6767 else
6768 addend = signed_addend = rel->r_addend;
6769
6770 switch (r_type)
6771 {
6772 case R_ARM_NONE:
6773 /* We don't need to find a value for this symbol. It's just a
6774 marker. */
6775 *unresolved_reloc_p = FALSE;
6776 return bfd_reloc_ok;
6777
6778 case R_ARM_ABS12:
6779 if (!globals->vxworks_p)
6780 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6781
6782 case R_ARM_PC24:
6783 case R_ARM_ABS32:
6784 case R_ARM_ABS32_NOI:
6785 case R_ARM_REL32:
6786 case R_ARM_REL32_NOI:
6787 case R_ARM_CALL:
6788 case R_ARM_JUMP24:
6789 case R_ARM_XPC25:
6790 case R_ARM_PREL31:
6791 case R_ARM_PLT32:
6792 /* Handle relocations which should use the PLT entry. ABS32/REL32
6793 will use the symbol's value, which may point to a PLT entry, but we
6794 don't need to handle that here. If we created a PLT entry, all
6795 branches in this object should go to it, except if the PLT is too
6796 far away, in which case a long branch stub should be inserted. */
6797 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6798 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6799 && r_type != R_ARM_CALL
6800 && r_type != R_ARM_JUMP24
6801 && r_type != R_ARM_PLT32)
6802 && h != NULL
6803 && splt != NULL
6804 && h->plt.offset != (bfd_vma) -1)
6805 {
6806 /* If we've created a .plt section, and assigned a PLT entry to
6807 this function, it should not be known to bind locally. If
6808 it were, we would have cleared the PLT entry. */
6809 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6810
6811 value = (splt->output_section->vma
6812 + splt->output_offset
6813 + h->plt.offset);
6814 *unresolved_reloc_p = FALSE;
6815 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6816 contents, rel->r_offset, value,
6817 rel->r_addend);
6818 }
6819
6820 /* When generating a shared object or relocatable executable, these
6821 relocations are copied into the output file to be resolved at
6822 run time. */
6823 if ((info->shared || globals->root.is_relocatable_executable)
6824 && (input_section->flags & SEC_ALLOC)
6825 && !(elf32_arm_hash_table (info)->vxworks_p
6826 && strcmp (input_section->output_section->name,
6827 ".tls_vars") == 0)
6828 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6829 || !SYMBOL_CALLS_LOCAL (info, h))
6830 && (h == NULL
6831 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6832 || h->root.type != bfd_link_hash_undefweak)
6833 && r_type != R_ARM_PC24
6834 && r_type != R_ARM_CALL
6835 && r_type != R_ARM_JUMP24
6836 && r_type != R_ARM_PREL31
6837 && r_type != R_ARM_PLT32)
6838 {
6839 Elf_Internal_Rela outrel;
6840 bfd_byte *loc;
6841 bfd_boolean skip, relocate;
6842
6843 *unresolved_reloc_p = FALSE;
6844
6845 if (sreloc == NULL)
6846 {
6847 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6848 ! globals->use_rel);
6849
6850 if (sreloc == NULL)
6851 return bfd_reloc_notsupported;
6852 }
6853
6854 skip = FALSE;
6855 relocate = FALSE;
6856
6857 outrel.r_addend = addend;
6858 outrel.r_offset =
6859 _bfd_elf_section_offset (output_bfd, info, input_section,
6860 rel->r_offset);
6861 if (outrel.r_offset == (bfd_vma) -1)
6862 skip = TRUE;
6863 else if (outrel.r_offset == (bfd_vma) -2)
6864 skip = TRUE, relocate = TRUE;
6865 outrel.r_offset += (input_section->output_section->vma
6866 + input_section->output_offset);
6867
6868 if (skip)
6869 memset (&outrel, 0, sizeof outrel);
6870 else if (h != NULL
6871 && h->dynindx != -1
6872 && (!info->shared
6873 || !info->symbolic
6874 || !h->def_regular))
6875 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6876 else
6877 {
6878 int symbol;
6879
6880 /* This symbol is local, or marked to become local. */
6881 if (sym_flags == STT_ARM_TFUNC)
6882 value |= 1;
6883 if (globals->symbian_p)
6884 {
6885 asection *osec;
6886
6887 /* On Symbian OS, the data segment and text segement
6888 can be relocated independently. Therefore, we
6889 must indicate the segment to which this
6890 relocation is relative. The BPABI allows us to
6891 use any symbol in the right segment; we just use
6892 the section symbol as it is convenient. (We
6893 cannot use the symbol given by "h" directly as it
6894 will not appear in the dynamic symbol table.)
6895
6896 Note that the dynamic linker ignores the section
6897 symbol value, so we don't subtract osec->vma
6898 from the emitted reloc addend. */
6899 if (sym_sec)
6900 osec = sym_sec->output_section;
6901 else
6902 osec = input_section->output_section;
6903 symbol = elf_section_data (osec)->dynindx;
6904 if (symbol == 0)
6905 {
6906 struct elf_link_hash_table *htab = elf_hash_table (info);
6907
6908 if ((osec->flags & SEC_READONLY) == 0
6909 && htab->data_index_section != NULL)
6910 osec = htab->data_index_section;
6911 else
6912 osec = htab->text_index_section;
6913 symbol = elf_section_data (osec)->dynindx;
6914 }
6915 BFD_ASSERT (symbol != 0);
6916 }
6917 else
6918 /* On SVR4-ish systems, the dynamic loader cannot
6919 relocate the text and data segments independently,
6920 so the symbol does not matter. */
6921 symbol = 0;
6922 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6923 if (globals->use_rel)
6924 relocate = TRUE;
6925 else
6926 outrel.r_addend += value;
6927 }
6928
6929 loc = sreloc->contents;
6930 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6931 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6932
6933 /* If this reloc is against an external symbol, we do not want to
6934 fiddle with the addend. Otherwise, we need to include the symbol
6935 value so that it becomes an addend for the dynamic reloc. */
6936 if (! relocate)
6937 return bfd_reloc_ok;
6938
6939 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6940 contents, rel->r_offset, value,
6941 (bfd_vma) 0);
6942 }
6943 else switch (r_type)
6944 {
6945 case R_ARM_ABS12:
6946 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6947
6948 case R_ARM_XPC25: /* Arm BLX instruction. */
6949 case R_ARM_CALL:
6950 case R_ARM_JUMP24:
6951 case R_ARM_PC24: /* Arm B/BL instruction. */
6952 case R_ARM_PLT32:
6953 {
6954 bfd_vma from;
6955 bfd_signed_vma branch_offset;
6956 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6957
6958 if (r_type == R_ARM_XPC25)
6959 {
6960 /* Check for Arm calling Arm function. */
6961 /* FIXME: Should we translate the instruction into a BL
6962 instruction instead ? */
6963 if (sym_flags != STT_ARM_TFUNC)
6964 (*_bfd_error_handler)
6965 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6966 input_bfd,
6967 h ? h->root.root.string : "(local)");
6968 }
6969 else if (r_type == R_ARM_PC24)
6970 {
6971 /* Check for Arm calling Thumb function. */
6972 if (sym_flags == STT_ARM_TFUNC)
6973 {
6974 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6975 output_bfd, input_section,
6976 hit_data, sym_sec, rel->r_offset,
6977 signed_addend, value,
6978 error_message))
6979 return bfd_reloc_ok;
6980 else
6981 return bfd_reloc_dangerous;
6982 }
6983 }
6984
6985 /* Check if a stub has to be inserted because the
6986 destination is too far or we are changing mode. */
6987 if ( r_type == R_ARM_CALL
6988 || r_type == R_ARM_JUMP24
6989 || r_type == R_ARM_PLT32)
6990 {
6991 /* If the call goes through a PLT entry, make sure to
6992 check distance to the right destination address. */
6993 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6994 {
6995 value = (splt->output_section->vma
6996 + splt->output_offset
6997 + h->plt.offset);
6998 *unresolved_reloc_p = FALSE;
6999 }
7000
7001 from = (input_section->output_section->vma
7002 + input_section->output_offset
7003 + rel->r_offset);
7004 branch_offset = (bfd_signed_vma)(value - from);
7005
7006 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
7007 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
7008 || ((sym_flags == STT_ARM_TFUNC)
7009 && (((r_type == R_ARM_CALL) && !globals->use_blx)
7010 || (r_type == R_ARM_JUMP24)
7011 || (r_type == R_ARM_PLT32) ))
7012 )
7013 {
7014 /* The target is out of reach, so redirect the
7015 branch to the local stub for this function. */
7016
7017 stub_entry = elf32_arm_get_stub_entry (input_section,
7018 sym_sec, h,
7019 rel, globals);
7020 if (stub_entry != NULL)
7021 value = (stub_entry->stub_offset
7022 + stub_entry->stub_sec->output_offset
7023 + stub_entry->stub_sec->output_section->vma);
7024 }
7025 }
7026
7027 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7028 where:
7029 S is the address of the symbol in the relocation.
7030 P is address of the instruction being relocated.
7031 A is the addend (extracted from the instruction) in bytes.
7032
7033 S is held in 'value'.
7034 P is the base address of the section containing the
7035 instruction plus the offset of the reloc into that
7036 section, ie:
7037 (input_section->output_section->vma +
7038 input_section->output_offset +
7039 rel->r_offset).
7040 A is the addend, converted into bytes, ie:
7041 (signed_addend * 4)
7042
7043 Note: None of these operations have knowledge of the pipeline
7044 size of the processor, thus it is up to the assembler to
7045 encode this information into the addend. */
7046 value -= (input_section->output_section->vma
7047 + input_section->output_offset);
7048 value -= rel->r_offset;
7049 if (globals->use_rel)
7050 value += (signed_addend << howto->size);
7051 else
7052 /* RELA addends do not have to be adjusted by howto->size. */
7053 value += signed_addend;
7054
7055 signed_addend = value;
7056 signed_addend >>= howto->rightshift;
7057
7058 /* A branch to an undefined weak symbol is turned into a jump to
7059 the next instruction unless a PLT entry will be created. */
7060 if (h && h->root.type == bfd_link_hash_undefweak
7061 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7062 {
7063 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7064 | 0x0affffff;
7065 }
7066 else
7067 {
7068 /* Perform a signed range check. */
7069 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7070 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7071 return bfd_reloc_overflow;
7072
7073 addend = (value & 2);
7074
7075 value = (signed_addend & howto->dst_mask)
7076 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7077
7078 if (r_type == R_ARM_CALL)
7079 {
7080 /* Set the H bit in the BLX instruction. */
7081 if (sym_flags == STT_ARM_TFUNC)
7082 {
7083 if (addend)
7084 value |= (1 << 24);
7085 else
7086 value &= ~(bfd_vma)(1 << 24);
7087 }
7088
7089 /* Select the correct instruction (BL or BLX). */
7090 /* Only if we are not handling a BL to a stub. In this
7091 case, mode switching is performed by the stub. */
7092 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7093 value |= (1 << 28);
7094 else
7095 {
7096 value &= ~(bfd_vma)(1 << 28);
7097 value |= (1 << 24);
7098 }
7099 }
7100 }
7101 }
7102 break;
7103
7104 case R_ARM_ABS32:
7105 value += addend;
7106 if (sym_flags == STT_ARM_TFUNC)
7107 value |= 1;
7108 break;
7109
7110 case R_ARM_ABS32_NOI:
7111 value += addend;
7112 break;
7113
7114 case R_ARM_REL32:
7115 value += addend;
7116 if (sym_flags == STT_ARM_TFUNC)
7117 value |= 1;
7118 value -= (input_section->output_section->vma
7119 + input_section->output_offset + rel->r_offset);
7120 break;
7121
7122 case R_ARM_REL32_NOI:
7123 value += addend;
7124 value -= (input_section->output_section->vma
7125 + input_section->output_offset + rel->r_offset);
7126 break;
7127
7128 case R_ARM_PREL31:
7129 value -= (input_section->output_section->vma
7130 + input_section->output_offset + rel->r_offset);
7131 value += signed_addend;
7132 if (! h || h->root.type != bfd_link_hash_undefweak)
7133 {
7134 /* Check for overflow. */
7135 if ((value ^ (value >> 1)) & (1 << 30))
7136 return bfd_reloc_overflow;
7137 }
7138 value &= 0x7fffffff;
7139 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7140 if (sym_flags == STT_ARM_TFUNC)
7141 value |= 1;
7142 break;
7143 }
7144
7145 bfd_put_32 (input_bfd, value, hit_data);
7146 return bfd_reloc_ok;
7147
7148 case R_ARM_ABS8:
7149 value += addend;
7150 if ((long) value > 0x7f || (long) value < -0x80)
7151 return bfd_reloc_overflow;
7152
7153 bfd_put_8 (input_bfd, value, hit_data);
7154 return bfd_reloc_ok;
7155
7156 case R_ARM_ABS16:
7157 value += addend;
7158
7159 if ((long) value > 0x7fff || (long) value < -0x8000)
7160 return bfd_reloc_overflow;
7161
7162 bfd_put_16 (input_bfd, value, hit_data);
7163 return bfd_reloc_ok;
7164
7165 case R_ARM_THM_ABS5:
7166 /* Support ldr and str instructions for the thumb. */
7167 if (globals->use_rel)
7168 {
7169 /* Need to refetch addend. */
7170 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7171 /* ??? Need to determine shift amount from operand size. */
7172 addend >>= howto->rightshift;
7173 }
7174 value += addend;
7175
7176 /* ??? Isn't value unsigned? */
7177 if ((long) value > 0x1f || (long) value < -0x10)
7178 return bfd_reloc_overflow;
7179
7180 /* ??? Value needs to be properly shifted into place first. */
7181 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7182 bfd_put_16 (input_bfd, value, hit_data);
7183 return bfd_reloc_ok;
7184
7185 case R_ARM_THM_ALU_PREL_11_0:
7186 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7187 {
7188 bfd_vma insn;
7189 bfd_signed_vma relocation;
7190
7191 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7192 | bfd_get_16 (input_bfd, hit_data + 2);
7193
7194 if (globals->use_rel)
7195 {
7196 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7197 | ((insn & (1 << 26)) >> 15);
7198 if (insn & 0xf00000)
7199 signed_addend = -signed_addend;
7200 }
7201
7202 relocation = value + signed_addend;
7203 relocation -= (input_section->output_section->vma
7204 + input_section->output_offset
7205 + rel->r_offset);
7206
7207 value = abs (relocation);
7208
7209 if (value >= 0x1000)
7210 return bfd_reloc_overflow;
7211
7212 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7213 | ((value & 0x700) << 4)
7214 | ((value & 0x800) << 15);
7215 if (relocation < 0)
7216 insn |= 0xa00000;
7217
7218 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7219 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7220
7221 return bfd_reloc_ok;
7222 }
7223
7224 case R_ARM_THM_PC8:
7225 /* PR 10073: This reloc is not generated by the GNU toolchain,
7226 but it is supported for compatibility with third party libraries
7227 generated by other compilers, specifically the ARM/IAR. */
7228 {
7229 bfd_vma insn;
7230 bfd_signed_vma relocation;
7231
7232 insn = bfd_get_16 (input_bfd, hit_data);
7233
7234 if (globals->use_rel)
7235 addend = (insn & 0x00ff) << 2;
7236
7237 relocation = value + addend;
7238 relocation -= (input_section->output_section->vma
7239 + input_section->output_offset
7240 + rel->r_offset);
7241
7242 value = abs (relocation);
7243
7244 /* We do not check for overflow of this reloc. Although strictly
7245 speaking this is incorrect, it appears to be necessary in order
7246 to work with IAR generated relocs. Since GCC and GAS do not
7247 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7248 a problem for them. */
7249 value &= 0x3fc;
7250
7251 insn = (insn & 0xff00) | (value >> 2);
7252
7253 bfd_put_16 (input_bfd, insn, hit_data);
7254
7255 return bfd_reloc_ok;
7256 }
7257
7258 case R_ARM_THM_PC12:
7259 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7260 {
7261 bfd_vma insn;
7262 bfd_signed_vma relocation;
7263
7264 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7265 | bfd_get_16 (input_bfd, hit_data + 2);
7266
7267 if (globals->use_rel)
7268 {
7269 signed_addend = insn & 0xfff;
7270 if (!(insn & (1 << 23)))
7271 signed_addend = -signed_addend;
7272 }
7273
7274 relocation = value + signed_addend;
7275 relocation -= (input_section->output_section->vma
7276 + input_section->output_offset
7277 + rel->r_offset);
7278
7279 value = abs (relocation);
7280
7281 if (value >= 0x1000)
7282 return bfd_reloc_overflow;
7283
7284 insn = (insn & 0xff7ff000) | value;
7285 if (relocation >= 0)
7286 insn |= (1 << 23);
7287
7288 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7289 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7290
7291 return bfd_reloc_ok;
7292 }
7293
7294 case R_ARM_THM_XPC22:
7295 case R_ARM_THM_CALL:
7296 case R_ARM_THM_JUMP24:
7297 /* Thumb BL (branch long instruction). */
7298 {
7299 bfd_vma relocation;
7300 bfd_vma reloc_sign;
7301 bfd_boolean overflow = FALSE;
7302 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7303 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7304 bfd_signed_vma reloc_signed_max;
7305 bfd_signed_vma reloc_signed_min;
7306 bfd_vma check;
7307 bfd_signed_vma signed_check;
7308 int bitsize;
7309 int thumb2 = using_thumb2 (globals);
7310
7311 /* A branch to an undefined weak symbol is turned into a jump to
7312 the next instruction unless a PLT entry will be created. */
7313 if (h && h->root.type == bfd_link_hash_undefweak
7314 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7315 {
7316 bfd_put_16 (input_bfd, 0xe000, hit_data);
7317 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7318 return bfd_reloc_ok;
7319 }
7320
7321 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7322 with Thumb-1) involving the J1 and J2 bits. */
7323 if (globals->use_rel)
7324 {
7325 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7326 bfd_vma upper = upper_insn & 0x3ff;
7327 bfd_vma lower = lower_insn & 0x7ff;
7328 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7329 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7330 bfd_vma i1 = j1 ^ s ? 0 : 1;
7331 bfd_vma i2 = j2 ^ s ? 0 : 1;
7332
7333 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7334 /* Sign extend. */
7335 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7336
7337 signed_addend = addend;
7338 }
7339
7340 if (r_type == R_ARM_THM_XPC22)
7341 {
7342 /* Check for Thumb to Thumb call. */
7343 /* FIXME: Should we translate the instruction into a BL
7344 instruction instead ? */
7345 if (sym_flags == STT_ARM_TFUNC)
7346 (*_bfd_error_handler)
7347 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7348 input_bfd,
7349 h ? h->root.root.string : "(local)");
7350 }
7351 else
7352 {
7353 /* If it is not a call to Thumb, assume call to Arm.
7354 If it is a call relative to a section name, then it is not a
7355 function call at all, but rather a long jump. Calls through
7356 the PLT do not require stubs. */
7357 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7358 && (h == NULL || splt == NULL
7359 || h->plt.offset == (bfd_vma) -1))
7360 {
7361 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7362 {
7363 /* Convert BL to BLX. */
7364 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7365 }
7366 else if (( r_type != R_ARM_THM_CALL)
7367 && (r_type != R_ARM_THM_JUMP24))
7368 {
7369 if (elf32_thumb_to_arm_stub
7370 (info, sym_name, input_bfd, output_bfd, input_section,
7371 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7372 error_message))
7373 return bfd_reloc_ok;
7374 else
7375 return bfd_reloc_dangerous;
7376 }
7377 }
7378 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7379 && r_type == R_ARM_THM_CALL)
7380 {
7381 /* Make sure this is a BL. */
7382 lower_insn |= 0x1800;
7383 }
7384 }
7385
7386 /* Handle calls via the PLT. */
7387 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7388 {
7389 value = (splt->output_section->vma
7390 + splt->output_offset
7391 + h->plt.offset);
7392 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7393 {
7394 /* If the Thumb BLX instruction is available, convert the
7395 BL to a BLX instruction to call the ARM-mode PLT entry. */
7396 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7397 }
7398 else
7399 /* Target the Thumb stub before the ARM PLT entry. */
7400 value -= PLT_THUMB_STUB_SIZE;
7401 *unresolved_reloc_p = FALSE;
7402 }
7403
7404 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7405 {
7406 /* Check if a stub has to be inserted because the destination
7407 is too far. */
7408 bfd_vma from;
7409 bfd_signed_vma branch_offset;
7410 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7411
7412 from = (input_section->output_section->vma
7413 + input_section->output_offset
7414 + rel->r_offset);
7415 branch_offset = (bfd_signed_vma)(value - from);
7416
7417 if ((!thumb2
7418 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7419 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7420 ||
7421 (thumb2
7422 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7423 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7424 || ((sym_flags != STT_ARM_TFUNC)
7425 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7426 || r_type == R_ARM_THM_JUMP24)))
7427 {
7428 /* The target is out of reach or we are changing modes, so
7429 redirect the branch to the local stub for this
7430 function. */
7431 stub_entry = elf32_arm_get_stub_entry (input_section,
7432 sym_sec, h,
7433 rel, globals);
7434 if (stub_entry != NULL)
7435 value = (stub_entry->stub_offset
7436 + stub_entry->stub_sec->output_offset
7437 + stub_entry->stub_sec->output_section->vma);
7438
7439 /* If this call becomes a call to Arm, force BLX. */
7440 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7441 {
7442 if ((stub_entry
7443 && !arm_stub_is_thumb (stub_entry->stub_type))
7444 || (sym_flags != STT_ARM_TFUNC))
7445 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7446 }
7447 }
7448 }
7449
7450 relocation = value + signed_addend;
7451
7452 relocation -= (input_section->output_section->vma
7453 + input_section->output_offset
7454 + rel->r_offset);
7455
7456 check = relocation >> howto->rightshift;
7457
7458 /* If this is a signed value, the rightshift just dropped
7459 leading 1 bits (assuming twos complement). */
7460 if ((bfd_signed_vma) relocation >= 0)
7461 signed_check = check;
7462 else
7463 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7464
7465 /* Calculate the permissable maximum and minimum values for
7466 this relocation according to whether we're relocating for
7467 Thumb-2 or not. */
7468 bitsize = howto->bitsize;
7469 if (!thumb2)
7470 bitsize -= 2;
7471 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7472 reloc_signed_min = ~reloc_signed_max;
7473
7474 /* Assumes two's complement. */
7475 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7476 overflow = TRUE;
7477
7478 if ((lower_insn & 0x5000) == 0x4000)
7479 /* For a BLX instruction, make sure that the relocation is rounded up
7480 to a word boundary. This follows the semantics of the instruction
7481 which specifies that bit 1 of the target address will come from bit
7482 1 of the base address. */
7483 relocation = (relocation + 2) & ~ 3;
7484
7485 /* Put RELOCATION back into the insn. Assumes two's complement.
7486 We use the Thumb-2 encoding, which is safe even if dealing with
7487 a Thumb-1 instruction by virtue of our overflow check above. */
7488 reloc_sign = (signed_check < 0) ? 1 : 0;
7489 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7490 | ((relocation >> 12) & 0x3ff)
7491 | (reloc_sign << 10);
7492 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7493 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7494 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7495 | ((relocation >> 1) & 0x7ff);
7496
7497 /* Put the relocated value back in the object file: */
7498 bfd_put_16 (input_bfd, upper_insn, hit_data);
7499 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7500
7501 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7502 }
7503 break;
7504
7505 case R_ARM_THM_JUMP19:
7506 /* Thumb32 conditional branch instruction. */
7507 {
7508 bfd_vma relocation;
7509 bfd_boolean overflow = FALSE;
7510 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7511 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7512 bfd_signed_vma reloc_signed_max = 0xffffe;
7513 bfd_signed_vma reloc_signed_min = -0x100000;
7514 bfd_signed_vma signed_check;
7515
7516 /* Need to refetch the addend, reconstruct the top three bits,
7517 and squish the two 11 bit pieces together. */
7518 if (globals->use_rel)
7519 {
7520 bfd_vma S = (upper_insn & 0x0400) >> 10;
7521 bfd_vma upper = (upper_insn & 0x003f);
7522 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7523 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7524 bfd_vma lower = (lower_insn & 0x07ff);
7525
7526 upper |= J1 << 6;
7527 upper |= J2 << 7;
7528 upper |= (!S) << 8;
7529 upper -= 0x0100; /* Sign extend. */
7530
7531 addend = (upper << 12) | (lower << 1);
7532 signed_addend = addend;
7533 }
7534
7535 /* Handle calls via the PLT. */
7536 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7537 {
7538 value = (splt->output_section->vma
7539 + splt->output_offset
7540 + h->plt.offset);
7541 /* Target the Thumb stub before the ARM PLT entry. */
7542 value -= PLT_THUMB_STUB_SIZE;
7543 *unresolved_reloc_p = FALSE;
7544 }
7545
7546 /* ??? Should handle interworking? GCC might someday try to
7547 use this for tail calls. */
7548
7549 relocation = value + signed_addend;
7550 relocation -= (input_section->output_section->vma
7551 + input_section->output_offset
7552 + rel->r_offset);
7553 signed_check = (bfd_signed_vma) relocation;
7554
7555 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7556 overflow = TRUE;
7557
7558 /* Put RELOCATION back into the insn. */
7559 {
7560 bfd_vma S = (relocation & 0x00100000) >> 20;
7561 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7562 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7563 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7564 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7565
7566 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7567 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7568 }
7569
7570 /* Put the relocated value back in the object file: */
7571 bfd_put_16 (input_bfd, upper_insn, hit_data);
7572 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7573
7574 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7575 }
7576
7577 case R_ARM_THM_JUMP11:
7578 case R_ARM_THM_JUMP8:
7579 case R_ARM_THM_JUMP6:
7580 /* Thumb B (branch) instruction). */
7581 {
7582 bfd_signed_vma relocation;
7583 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7584 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7585 bfd_signed_vma signed_check;
7586
7587 /* CZB cannot jump backward. */
7588 if (r_type == R_ARM_THM_JUMP6)
7589 reloc_signed_min = 0;
7590
7591 if (globals->use_rel)
7592 {
7593 /* Need to refetch addend. */
7594 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7595 if (addend & ((howto->src_mask + 1) >> 1))
7596 {
7597 signed_addend = -1;
7598 signed_addend &= ~ howto->src_mask;
7599 signed_addend |= addend;
7600 }
7601 else
7602 signed_addend = addend;
7603 /* The value in the insn has been right shifted. We need to
7604 undo this, so that we can perform the address calculation
7605 in terms of bytes. */
7606 signed_addend <<= howto->rightshift;
7607 }
7608 relocation = value + signed_addend;
7609
7610 relocation -= (input_section->output_section->vma
7611 + input_section->output_offset
7612 + rel->r_offset);
7613
7614 relocation >>= howto->rightshift;
7615 signed_check = relocation;
7616
7617 if (r_type == R_ARM_THM_JUMP6)
7618 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7619 else
7620 relocation &= howto->dst_mask;
7621 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7622
7623 bfd_put_16 (input_bfd, relocation, hit_data);
7624
7625 /* Assumes two's complement. */
7626 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7627 return bfd_reloc_overflow;
7628
7629 return bfd_reloc_ok;
7630 }
7631
7632 case R_ARM_ALU_PCREL7_0:
7633 case R_ARM_ALU_PCREL15_8:
7634 case R_ARM_ALU_PCREL23_15:
7635 {
7636 bfd_vma insn;
7637 bfd_vma relocation;
7638
7639 insn = bfd_get_32 (input_bfd, hit_data);
7640 if (globals->use_rel)
7641 {
7642 /* Extract the addend. */
7643 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7644 signed_addend = addend;
7645 }
7646 relocation = value + signed_addend;
7647
7648 relocation -= (input_section->output_section->vma
7649 + input_section->output_offset
7650 + rel->r_offset);
7651 insn = (insn & ~0xfff)
7652 | ((howto->bitpos << 7) & 0xf00)
7653 | ((relocation >> howto->bitpos) & 0xff);
7654 bfd_put_32 (input_bfd, value, hit_data);
7655 }
7656 return bfd_reloc_ok;
7657
7658 case R_ARM_GNU_VTINHERIT:
7659 case R_ARM_GNU_VTENTRY:
7660 return bfd_reloc_ok;
7661
7662 case R_ARM_GOTOFF32:
7663 /* Relocation is relative to the start of the
7664 global offset table. */
7665
7666 BFD_ASSERT (sgot != NULL);
7667 if (sgot == NULL)
7668 return bfd_reloc_notsupported;
7669
7670 /* If we are addressing a Thumb function, we need to adjust the
7671 address by one, so that attempts to call the function pointer will
7672 correctly interpret it as Thumb code. */
7673 if (sym_flags == STT_ARM_TFUNC)
7674 value += 1;
7675
7676 /* Note that sgot->output_offset is not involved in this
7677 calculation. We always want the start of .got. If we
7678 define _GLOBAL_OFFSET_TABLE in a different way, as is
7679 permitted by the ABI, we might have to change this
7680 calculation. */
7681 value -= sgot->output_section->vma;
7682 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7683 contents, rel->r_offset, value,
7684 rel->r_addend);
7685
7686 case R_ARM_GOTPC:
7687 /* Use global offset table as symbol value. */
7688 BFD_ASSERT (sgot != NULL);
7689
7690 if (sgot == NULL)
7691 return bfd_reloc_notsupported;
7692
7693 *unresolved_reloc_p = FALSE;
7694 value = sgot->output_section->vma;
7695 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7696 contents, rel->r_offset, value,
7697 rel->r_addend);
7698
7699 case R_ARM_GOT32:
7700 case R_ARM_GOT_PREL:
7701 /* Relocation is to the entry for this symbol in the
7702 global offset table. */
7703 if (sgot == NULL)
7704 return bfd_reloc_notsupported;
7705
7706 if (h != NULL)
7707 {
7708 bfd_vma off;
7709 bfd_boolean dyn;
7710
7711 off = h->got.offset;
7712 BFD_ASSERT (off != (bfd_vma) -1);
7713 dyn = globals->root.dynamic_sections_created;
7714
7715 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7716 || (info->shared
7717 && SYMBOL_REFERENCES_LOCAL (info, h))
7718 || (ELF_ST_VISIBILITY (h->other)
7719 && h->root.type == bfd_link_hash_undefweak))
7720 {
7721 /* This is actually a static link, or it is a -Bsymbolic link
7722 and the symbol is defined locally. We must initialize this
7723 entry in the global offset table. Since the offset must
7724 always be a multiple of 4, we use the least significant bit
7725 to record whether we have initialized it already.
7726
7727 When doing a dynamic link, we create a .rel(a).got relocation
7728 entry to initialize the value. This is done in the
7729 finish_dynamic_symbol routine. */
7730 if ((off & 1) != 0)
7731 off &= ~1;
7732 else
7733 {
7734 /* If we are addressing a Thumb function, we need to
7735 adjust the address by one, so that attempts to
7736 call the function pointer will correctly
7737 interpret it as Thumb code. */
7738 if (sym_flags == STT_ARM_TFUNC)
7739 value |= 1;
7740
7741 bfd_put_32 (output_bfd, value, sgot->contents + off);
7742 h->got.offset |= 1;
7743 }
7744 }
7745 else
7746 *unresolved_reloc_p = FALSE;
7747
7748 value = sgot->output_offset + off;
7749 }
7750 else
7751 {
7752 bfd_vma off;
7753
7754 BFD_ASSERT (local_got_offsets != NULL &&
7755 local_got_offsets[r_symndx] != (bfd_vma) -1);
7756
7757 off = local_got_offsets[r_symndx];
7758
7759 /* The offset must always be a multiple of 4. We use the
7760 least significant bit to record whether we have already
7761 generated the necessary reloc. */
7762 if ((off & 1) != 0)
7763 off &= ~1;
7764 else
7765 {
7766 /* If we are addressing a Thumb function, we need to
7767 adjust the address by one, so that attempts to
7768 call the function pointer will correctly
7769 interpret it as Thumb code. */
7770 if (sym_flags == STT_ARM_TFUNC)
7771 value |= 1;
7772
7773 if (globals->use_rel)
7774 bfd_put_32 (output_bfd, value, sgot->contents + off);
7775
7776 if (info->shared)
7777 {
7778 asection * srelgot;
7779 Elf_Internal_Rela outrel;
7780 bfd_byte *loc;
7781
7782 srelgot = (bfd_get_section_by_name
7783 (dynobj, RELOC_SECTION (globals, ".got")));
7784 BFD_ASSERT (srelgot != NULL);
7785
7786 outrel.r_addend = addend + value;
7787 outrel.r_offset = (sgot->output_section->vma
7788 + sgot->output_offset
7789 + off);
7790 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7791 loc = srelgot->contents;
7792 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7793 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7794 }
7795
7796 local_got_offsets[r_symndx] |= 1;
7797 }
7798
7799 value = sgot->output_offset + off;
7800 }
7801 if (r_type != R_ARM_GOT32)
7802 value += sgot->output_section->vma;
7803
7804 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7805 contents, rel->r_offset, value,
7806 rel->r_addend);
7807
7808 case R_ARM_TLS_LDO32:
7809 value = value - dtpoff_base (info);
7810
7811 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7812 contents, rel->r_offset, value,
7813 rel->r_addend);
7814
7815 case R_ARM_TLS_LDM32:
7816 {
7817 bfd_vma off;
7818
7819 if (globals->sgot == NULL)
7820 abort ();
7821
7822 off = globals->tls_ldm_got.offset;
7823
7824 if ((off & 1) != 0)
7825 off &= ~1;
7826 else
7827 {
7828 /* If we don't know the module number, create a relocation
7829 for it. */
7830 if (info->shared)
7831 {
7832 Elf_Internal_Rela outrel;
7833 bfd_byte *loc;
7834
7835 if (globals->srelgot == NULL)
7836 abort ();
7837
7838 outrel.r_addend = 0;
7839 outrel.r_offset = (globals->sgot->output_section->vma
7840 + globals->sgot->output_offset + off);
7841 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7842
7843 if (globals->use_rel)
7844 bfd_put_32 (output_bfd, outrel.r_addend,
7845 globals->sgot->contents + off);
7846
7847 loc = globals->srelgot->contents;
7848 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7849 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7850 }
7851 else
7852 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7853
7854 globals->tls_ldm_got.offset |= 1;
7855 }
7856
7857 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7858 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7859
7860 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7861 contents, rel->r_offset, value,
7862 rel->r_addend);
7863 }
7864
7865 case R_ARM_TLS_GD32:
7866 case R_ARM_TLS_IE32:
7867 {
7868 bfd_vma off;
7869 int indx;
7870 char tls_type;
7871
7872 if (globals->sgot == NULL)
7873 abort ();
7874
7875 indx = 0;
7876 if (h != NULL)
7877 {
7878 bfd_boolean dyn;
7879 dyn = globals->root.dynamic_sections_created;
7880 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7881 && (!info->shared
7882 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7883 {
7884 *unresolved_reloc_p = FALSE;
7885 indx = h->dynindx;
7886 }
7887 off = h->got.offset;
7888 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7889 }
7890 else
7891 {
7892 if (local_got_offsets == NULL)
7893 abort ();
7894 off = local_got_offsets[r_symndx];
7895 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7896 }
7897
7898 if (tls_type == GOT_UNKNOWN)
7899 abort ();
7900
7901 if ((off & 1) != 0)
7902 off &= ~1;
7903 else
7904 {
7905 bfd_boolean need_relocs = FALSE;
7906 Elf_Internal_Rela outrel;
7907 bfd_byte *loc = NULL;
7908 int cur_off = off;
7909
7910 /* The GOT entries have not been initialized yet. Do it
7911 now, and emit any relocations. If both an IE GOT and a
7912 GD GOT are necessary, we emit the GD first. */
7913
7914 if ((info->shared || indx != 0)
7915 && (h == NULL
7916 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7917 || h->root.type != bfd_link_hash_undefweak))
7918 {
7919 need_relocs = TRUE;
7920 if (globals->srelgot == NULL)
7921 abort ();
7922 loc = globals->srelgot->contents;
7923 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7924 }
7925
7926 if (tls_type & GOT_TLS_GD)
7927 {
7928 if (need_relocs)
7929 {
7930 outrel.r_addend = 0;
7931 outrel.r_offset = (globals->sgot->output_section->vma
7932 + globals->sgot->output_offset
7933 + cur_off);
7934 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7935
7936 if (globals->use_rel)
7937 bfd_put_32 (output_bfd, outrel.r_addend,
7938 globals->sgot->contents + cur_off);
7939
7940 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7941 globals->srelgot->reloc_count++;
7942 loc += RELOC_SIZE (globals);
7943
7944 if (indx == 0)
7945 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7946 globals->sgot->contents + cur_off + 4);
7947 else
7948 {
7949 outrel.r_addend = 0;
7950 outrel.r_info = ELF32_R_INFO (indx,
7951 R_ARM_TLS_DTPOFF32);
7952 outrel.r_offset += 4;
7953
7954 if (globals->use_rel)
7955 bfd_put_32 (output_bfd, outrel.r_addend,
7956 globals->sgot->contents + cur_off + 4);
7957
7958
7959 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7960 globals->srelgot->reloc_count++;
7961 loc += RELOC_SIZE (globals);
7962 }
7963 }
7964 else
7965 {
7966 /* If we are not emitting relocations for a
7967 general dynamic reference, then we must be in a
7968 static link or an executable link with the
7969 symbol binding locally. Mark it as belonging
7970 to module 1, the executable. */
7971 bfd_put_32 (output_bfd, 1,
7972 globals->sgot->contents + cur_off);
7973 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7974 globals->sgot->contents + cur_off + 4);
7975 }
7976
7977 cur_off += 8;
7978 }
7979
7980 if (tls_type & GOT_TLS_IE)
7981 {
7982 if (need_relocs)
7983 {
7984 if (indx == 0)
7985 outrel.r_addend = value - dtpoff_base (info);
7986 else
7987 outrel.r_addend = 0;
7988 outrel.r_offset = (globals->sgot->output_section->vma
7989 + globals->sgot->output_offset
7990 + cur_off);
7991 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7992
7993 if (globals->use_rel)
7994 bfd_put_32 (output_bfd, outrel.r_addend,
7995 globals->sgot->contents + cur_off);
7996
7997 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7998 globals->srelgot->reloc_count++;
7999 loc += RELOC_SIZE (globals);
8000 }
8001 else
8002 bfd_put_32 (output_bfd, tpoff (info, value),
8003 globals->sgot->contents + cur_off);
8004 cur_off += 4;
8005 }
8006
8007 if (h != NULL)
8008 h->got.offset |= 1;
8009 else
8010 local_got_offsets[r_symndx] |= 1;
8011 }
8012
8013 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8014 off += 8;
8015 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8016 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8017
8018 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8019 contents, rel->r_offset, value,
8020 rel->r_addend);
8021 }
8022
8023 case R_ARM_TLS_LE32:
8024 if (info->shared)
8025 {
8026 (*_bfd_error_handler)
8027 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8028 input_bfd, input_section,
8029 (long) rel->r_offset, howto->name);
8030 return FALSE;
8031 }
8032 else
8033 value = tpoff (info, value);
8034
8035 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8036 contents, rel->r_offset, value,
8037 rel->r_addend);
8038
8039 case R_ARM_V4BX:
8040 if (globals->fix_v4bx)
8041 {
8042 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8043
8044 /* Ensure that we have a BX instruction. */
8045 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8046
8047 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8048 {
8049 /* Branch to veneer. */
8050 bfd_vma glue_addr;
8051 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8052 glue_addr -= input_section->output_section->vma
8053 + input_section->output_offset
8054 + rel->r_offset + 8;
8055 insn = (insn & 0xf0000000) | 0x0a000000
8056 | ((glue_addr >> 2) & 0x00ffffff);
8057 }
8058 else
8059 {
8060 /* Preserve Rm (lowest four bits) and the condition code
8061 (highest four bits). Other bits encode MOV PC,Rm. */
8062 insn = (insn & 0xf000000f) | 0x01a0f000;
8063 }
8064
8065 bfd_put_32 (input_bfd, insn, hit_data);
8066 }
8067 return bfd_reloc_ok;
8068
8069 case R_ARM_MOVW_ABS_NC:
8070 case R_ARM_MOVT_ABS:
8071 case R_ARM_MOVW_PREL_NC:
8072 case R_ARM_MOVT_PREL:
8073 /* Until we properly support segment-base-relative addressing then
8074 we assume the segment base to be zero, as for the group relocations.
8075 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8076 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8077 case R_ARM_MOVW_BREL_NC:
8078 case R_ARM_MOVW_BREL:
8079 case R_ARM_MOVT_BREL:
8080 {
8081 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8082
8083 if (globals->use_rel)
8084 {
8085 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8086 signed_addend = (addend ^ 0x8000) - 0x8000;
8087 }
8088
8089 value += signed_addend;
8090
8091 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8092 value -= (input_section->output_section->vma
8093 + input_section->output_offset + rel->r_offset);
8094
8095 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8096 return bfd_reloc_overflow;
8097
8098 if (sym_flags == STT_ARM_TFUNC)
8099 value |= 1;
8100
8101 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8102 || r_type == R_ARM_MOVT_BREL)
8103 value >>= 16;
8104
8105 insn &= 0xfff0f000;
8106 insn |= value & 0xfff;
8107 insn |= (value & 0xf000) << 4;
8108 bfd_put_32 (input_bfd, insn, hit_data);
8109 }
8110 return bfd_reloc_ok;
8111
8112 case R_ARM_THM_MOVW_ABS_NC:
8113 case R_ARM_THM_MOVT_ABS:
8114 case R_ARM_THM_MOVW_PREL_NC:
8115 case R_ARM_THM_MOVT_PREL:
8116 /* Until we properly support segment-base-relative addressing then
8117 we assume the segment base to be zero, as for the above relocations.
8118 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8119 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8120 as R_ARM_THM_MOVT_ABS. */
8121 case R_ARM_THM_MOVW_BREL_NC:
8122 case R_ARM_THM_MOVW_BREL:
8123 case R_ARM_THM_MOVT_BREL:
8124 {
8125 bfd_vma insn;
8126
8127 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8128 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8129
8130 if (globals->use_rel)
8131 {
8132 addend = ((insn >> 4) & 0xf000)
8133 | ((insn >> 15) & 0x0800)
8134 | ((insn >> 4) & 0x0700)
8135 | (insn & 0x00ff);
8136 signed_addend = (addend ^ 0x8000) - 0x8000;
8137 }
8138
8139 value += signed_addend;
8140
8141 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8142 value -= (input_section->output_section->vma
8143 + input_section->output_offset + rel->r_offset);
8144
8145 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8146 return bfd_reloc_overflow;
8147
8148 if (sym_flags == STT_ARM_TFUNC)
8149 value |= 1;
8150
8151 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8152 || r_type == R_ARM_THM_MOVT_BREL)
8153 value >>= 16;
8154
8155 insn &= 0xfbf08f00;
8156 insn |= (value & 0xf000) << 4;
8157 insn |= (value & 0x0800) << 15;
8158 insn |= (value & 0x0700) << 4;
8159 insn |= (value & 0x00ff);
8160
8161 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8162 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8163 }
8164 return bfd_reloc_ok;
8165
8166 case R_ARM_ALU_PC_G0_NC:
8167 case R_ARM_ALU_PC_G1_NC:
8168 case R_ARM_ALU_PC_G0:
8169 case R_ARM_ALU_PC_G1:
8170 case R_ARM_ALU_PC_G2:
8171 case R_ARM_ALU_SB_G0_NC:
8172 case R_ARM_ALU_SB_G1_NC:
8173 case R_ARM_ALU_SB_G0:
8174 case R_ARM_ALU_SB_G1:
8175 case R_ARM_ALU_SB_G2:
8176 {
8177 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8178 bfd_vma pc = input_section->output_section->vma
8179 + input_section->output_offset + rel->r_offset;
8180 /* sb should be the origin of the *segment* containing the symbol.
8181 It is not clear how to obtain this OS-dependent value, so we
8182 make an arbitrary choice of zero. */
8183 bfd_vma sb = 0;
8184 bfd_vma residual;
8185 bfd_vma g_n;
8186 bfd_signed_vma signed_value;
8187 int group = 0;
8188
8189 /* Determine which group of bits to select. */
8190 switch (r_type)
8191 {
8192 case R_ARM_ALU_PC_G0_NC:
8193 case R_ARM_ALU_PC_G0:
8194 case R_ARM_ALU_SB_G0_NC:
8195 case R_ARM_ALU_SB_G0:
8196 group = 0;
8197 break;
8198
8199 case R_ARM_ALU_PC_G1_NC:
8200 case R_ARM_ALU_PC_G1:
8201 case R_ARM_ALU_SB_G1_NC:
8202 case R_ARM_ALU_SB_G1:
8203 group = 1;
8204 break;
8205
8206 case R_ARM_ALU_PC_G2:
8207 case R_ARM_ALU_SB_G2:
8208 group = 2;
8209 break;
8210
8211 default:
8212 abort ();
8213 }
8214
8215 /* If REL, extract the addend from the insn. If RELA, it will
8216 have already been fetched for us. */
8217 if (globals->use_rel)
8218 {
8219 int negative;
8220 bfd_vma constant = insn & 0xff;
8221 bfd_vma rotation = (insn & 0xf00) >> 8;
8222
8223 if (rotation == 0)
8224 signed_addend = constant;
8225 else
8226 {
8227 /* Compensate for the fact that in the instruction, the
8228 rotation is stored in multiples of 2 bits. */
8229 rotation *= 2;
8230
8231 /* Rotate "constant" right by "rotation" bits. */
8232 signed_addend = (constant >> rotation) |
8233 (constant << (8 * sizeof (bfd_vma) - rotation));
8234 }
8235
8236 /* Determine if the instruction is an ADD or a SUB.
8237 (For REL, this determines the sign of the addend.) */
8238 negative = identify_add_or_sub (insn);
8239 if (negative == 0)
8240 {
8241 (*_bfd_error_handler)
8242 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8243 input_bfd, input_section,
8244 (long) rel->r_offset, howto->name);
8245 return bfd_reloc_overflow;
8246 }
8247
8248 signed_addend *= negative;
8249 }
8250
8251 /* Compute the value (X) to go in the place. */
8252 if (r_type == R_ARM_ALU_PC_G0_NC
8253 || r_type == R_ARM_ALU_PC_G1_NC
8254 || r_type == R_ARM_ALU_PC_G0
8255 || r_type == R_ARM_ALU_PC_G1
8256 || r_type == R_ARM_ALU_PC_G2)
8257 /* PC relative. */
8258 signed_value = value - pc + signed_addend;
8259 else
8260 /* Section base relative. */
8261 signed_value = value - sb + signed_addend;
8262
8263 /* If the target symbol is a Thumb function, then set the
8264 Thumb bit in the address. */
8265 if (sym_flags == STT_ARM_TFUNC)
8266 signed_value |= 1;
8267
8268 /* Calculate the value of the relevant G_n, in encoded
8269 constant-with-rotation format. */
8270 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8271 &residual);
8272
8273 /* Check for overflow if required. */
8274 if ((r_type == R_ARM_ALU_PC_G0
8275 || r_type == R_ARM_ALU_PC_G1
8276 || r_type == R_ARM_ALU_PC_G2
8277 || r_type == R_ARM_ALU_SB_G0
8278 || r_type == R_ARM_ALU_SB_G1
8279 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8280 {
8281 (*_bfd_error_handler)
8282 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8283 input_bfd, input_section,
8284 (long) rel->r_offset, abs (signed_value), howto->name);
8285 return bfd_reloc_overflow;
8286 }
8287
8288 /* Mask out the value and the ADD/SUB part of the opcode; take care
8289 not to destroy the S bit. */
8290 insn &= 0xff1ff000;
8291
8292 /* Set the opcode according to whether the value to go in the
8293 place is negative. */
8294 if (signed_value < 0)
8295 insn |= 1 << 22;
8296 else
8297 insn |= 1 << 23;
8298
8299 /* Encode the offset. */
8300 insn |= g_n;
8301
8302 bfd_put_32 (input_bfd, insn, hit_data);
8303 }
8304 return bfd_reloc_ok;
8305
8306 case R_ARM_LDR_PC_G0:
8307 case R_ARM_LDR_PC_G1:
8308 case R_ARM_LDR_PC_G2:
8309 case R_ARM_LDR_SB_G0:
8310 case R_ARM_LDR_SB_G1:
8311 case R_ARM_LDR_SB_G2:
8312 {
8313 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8314 bfd_vma pc = input_section->output_section->vma
8315 + input_section->output_offset + rel->r_offset;
8316 bfd_vma sb = 0; /* See note above. */
8317 bfd_vma residual;
8318 bfd_signed_vma signed_value;
8319 int group = 0;
8320
8321 /* Determine which groups of bits to calculate. */
8322 switch (r_type)
8323 {
8324 case R_ARM_LDR_PC_G0:
8325 case R_ARM_LDR_SB_G0:
8326 group = 0;
8327 break;
8328
8329 case R_ARM_LDR_PC_G1:
8330 case R_ARM_LDR_SB_G1:
8331 group = 1;
8332 break;
8333
8334 case R_ARM_LDR_PC_G2:
8335 case R_ARM_LDR_SB_G2:
8336 group = 2;
8337 break;
8338
8339 default:
8340 abort ();
8341 }
8342
8343 /* If REL, extract the addend from the insn. If RELA, it will
8344 have already been fetched for us. */
8345 if (globals->use_rel)
8346 {
8347 int negative = (insn & (1 << 23)) ? 1 : -1;
8348 signed_addend = negative * (insn & 0xfff);
8349 }
8350
8351 /* Compute the value (X) to go in the place. */
8352 if (r_type == R_ARM_LDR_PC_G0
8353 || r_type == R_ARM_LDR_PC_G1
8354 || r_type == R_ARM_LDR_PC_G2)
8355 /* PC relative. */
8356 signed_value = value - pc + signed_addend;
8357 else
8358 /* Section base relative. */
8359 signed_value = value - sb + signed_addend;
8360
8361 /* Calculate the value of the relevant G_{n-1} to obtain
8362 the residual at that stage. */
8363 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8364
8365 /* Check for overflow. */
8366 if (residual >= 0x1000)
8367 {
8368 (*_bfd_error_handler)
8369 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8370 input_bfd, input_section,
8371 (long) rel->r_offset, abs (signed_value), howto->name);
8372 return bfd_reloc_overflow;
8373 }
8374
8375 /* Mask out the value and U bit. */
8376 insn &= 0xff7ff000;
8377
8378 /* Set the U bit if the value to go in the place is non-negative. */
8379 if (signed_value >= 0)
8380 insn |= 1 << 23;
8381
8382 /* Encode the offset. */
8383 insn |= residual;
8384
8385 bfd_put_32 (input_bfd, insn, hit_data);
8386 }
8387 return bfd_reloc_ok;
8388
8389 case R_ARM_LDRS_PC_G0:
8390 case R_ARM_LDRS_PC_G1:
8391 case R_ARM_LDRS_PC_G2:
8392 case R_ARM_LDRS_SB_G0:
8393 case R_ARM_LDRS_SB_G1:
8394 case R_ARM_LDRS_SB_G2:
8395 {
8396 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8397 bfd_vma pc = input_section->output_section->vma
8398 + input_section->output_offset + rel->r_offset;
8399 bfd_vma sb = 0; /* See note above. */
8400 bfd_vma residual;
8401 bfd_signed_vma signed_value;
8402 int group = 0;
8403
8404 /* Determine which groups of bits to calculate. */
8405 switch (r_type)
8406 {
8407 case R_ARM_LDRS_PC_G0:
8408 case R_ARM_LDRS_SB_G0:
8409 group = 0;
8410 break;
8411
8412 case R_ARM_LDRS_PC_G1:
8413 case R_ARM_LDRS_SB_G1:
8414 group = 1;
8415 break;
8416
8417 case R_ARM_LDRS_PC_G2:
8418 case R_ARM_LDRS_SB_G2:
8419 group = 2;
8420 break;
8421
8422 default:
8423 abort ();
8424 }
8425
8426 /* If REL, extract the addend from the insn. If RELA, it will
8427 have already been fetched for us. */
8428 if (globals->use_rel)
8429 {
8430 int negative = (insn & (1 << 23)) ? 1 : -1;
8431 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8432 }
8433
8434 /* Compute the value (X) to go in the place. */
8435 if (r_type == R_ARM_LDRS_PC_G0
8436 || r_type == R_ARM_LDRS_PC_G1
8437 || r_type == R_ARM_LDRS_PC_G2)
8438 /* PC relative. */
8439 signed_value = value - pc + signed_addend;
8440 else
8441 /* Section base relative. */
8442 signed_value = value - sb + signed_addend;
8443
8444 /* Calculate the value of the relevant G_{n-1} to obtain
8445 the residual at that stage. */
8446 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8447
8448 /* Check for overflow. */
8449 if (residual >= 0x100)
8450 {
8451 (*_bfd_error_handler)
8452 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8453 input_bfd, input_section,
8454 (long) rel->r_offset, abs (signed_value), howto->name);
8455 return bfd_reloc_overflow;
8456 }
8457
8458 /* Mask out the value and U bit. */
8459 insn &= 0xff7ff0f0;
8460
8461 /* Set the U bit if the value to go in the place is non-negative. */
8462 if (signed_value >= 0)
8463 insn |= 1 << 23;
8464
8465 /* Encode the offset. */
8466 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8467
8468 bfd_put_32 (input_bfd, insn, hit_data);
8469 }
8470 return bfd_reloc_ok;
8471
8472 case R_ARM_LDC_PC_G0:
8473 case R_ARM_LDC_PC_G1:
8474 case R_ARM_LDC_PC_G2:
8475 case R_ARM_LDC_SB_G0:
8476 case R_ARM_LDC_SB_G1:
8477 case R_ARM_LDC_SB_G2:
8478 {
8479 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8480 bfd_vma pc = input_section->output_section->vma
8481 + input_section->output_offset + rel->r_offset;
8482 bfd_vma sb = 0; /* See note above. */
8483 bfd_vma residual;
8484 bfd_signed_vma signed_value;
8485 int group = 0;
8486
8487 /* Determine which groups of bits to calculate. */
8488 switch (r_type)
8489 {
8490 case R_ARM_LDC_PC_G0:
8491 case R_ARM_LDC_SB_G0:
8492 group = 0;
8493 break;
8494
8495 case R_ARM_LDC_PC_G1:
8496 case R_ARM_LDC_SB_G1:
8497 group = 1;
8498 break;
8499
8500 case R_ARM_LDC_PC_G2:
8501 case R_ARM_LDC_SB_G2:
8502 group = 2;
8503 break;
8504
8505 default:
8506 abort ();
8507 }
8508
8509 /* If REL, extract the addend from the insn. If RELA, it will
8510 have already been fetched for us. */
8511 if (globals->use_rel)
8512 {
8513 int negative = (insn & (1 << 23)) ? 1 : -1;
8514 signed_addend = negative * ((insn & 0xff) << 2);
8515 }
8516
8517 /* Compute the value (X) to go in the place. */
8518 if (r_type == R_ARM_LDC_PC_G0
8519 || r_type == R_ARM_LDC_PC_G1
8520 || r_type == R_ARM_LDC_PC_G2)
8521 /* PC relative. */
8522 signed_value = value - pc + signed_addend;
8523 else
8524 /* Section base relative. */
8525 signed_value = value - sb + signed_addend;
8526
8527 /* Calculate the value of the relevant G_{n-1} to obtain
8528 the residual at that stage. */
8529 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8530
8531 /* Check for overflow. (The absolute value to go in the place must be
8532 divisible by four and, after having been divided by four, must
8533 fit in eight bits.) */
8534 if ((residual & 0x3) != 0 || residual >= 0x400)
8535 {
8536 (*_bfd_error_handler)
8537 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8538 input_bfd, input_section,
8539 (long) rel->r_offset, abs (signed_value), howto->name);
8540 return bfd_reloc_overflow;
8541 }
8542
8543 /* Mask out the value and U bit. */
8544 insn &= 0xff7fff00;
8545
8546 /* Set the U bit if the value to go in the place is non-negative. */
8547 if (signed_value >= 0)
8548 insn |= 1 << 23;
8549
8550 /* Encode the offset. */
8551 insn |= residual >> 2;
8552
8553 bfd_put_32 (input_bfd, insn, hit_data);
8554 }
8555 return bfd_reloc_ok;
8556
8557 default:
8558 return bfd_reloc_notsupported;
8559 }
8560 }
8561
8562 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8563 static void
8564 arm_add_to_rel (bfd * abfd,
8565 bfd_byte * address,
8566 reloc_howto_type * howto,
8567 bfd_signed_vma increment)
8568 {
8569 bfd_signed_vma addend;
8570
8571 if (howto->type == R_ARM_THM_CALL
8572 || howto->type == R_ARM_THM_JUMP24)
8573 {
8574 int upper_insn, lower_insn;
8575 int upper, lower;
8576
8577 upper_insn = bfd_get_16 (abfd, address);
8578 lower_insn = bfd_get_16 (abfd, address + 2);
8579 upper = upper_insn & 0x7ff;
8580 lower = lower_insn & 0x7ff;
8581
8582 addend = (upper << 12) | (lower << 1);
8583 addend += increment;
8584 addend >>= 1;
8585
8586 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8587 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8588
8589 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8590 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8591 }
8592 else
8593 {
8594 bfd_vma contents;
8595
8596 contents = bfd_get_32 (abfd, address);
8597
8598 /* Get the (signed) value from the instruction. */
8599 addend = contents & howto->src_mask;
8600 if (addend & ((howto->src_mask + 1) >> 1))
8601 {
8602 bfd_signed_vma mask;
8603
8604 mask = -1;
8605 mask &= ~ howto->src_mask;
8606 addend |= mask;
8607 }
8608
8609 /* Add in the increment, (which is a byte value). */
8610 switch (howto->type)
8611 {
8612 default:
8613 addend += increment;
8614 break;
8615
8616 case R_ARM_PC24:
8617 case R_ARM_PLT32:
8618 case R_ARM_CALL:
8619 case R_ARM_JUMP24:
8620 addend <<= howto->size;
8621 addend += increment;
8622
8623 /* Should we check for overflow here ? */
8624
8625 /* Drop any undesired bits. */
8626 addend >>= howto->rightshift;
8627 break;
8628 }
8629
8630 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8631
8632 bfd_put_32 (abfd, contents, address);
8633 }
8634 }
8635
8636 #define IS_ARM_TLS_RELOC(R_TYPE) \
8637 ((R_TYPE) == R_ARM_TLS_GD32 \
8638 || (R_TYPE) == R_ARM_TLS_LDO32 \
8639 || (R_TYPE) == R_ARM_TLS_LDM32 \
8640 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8641 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8642 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8643 || (R_TYPE) == R_ARM_TLS_LE32 \
8644 || (R_TYPE) == R_ARM_TLS_IE32)
8645
8646 /* Relocate an ARM ELF section. */
8647
8648 static bfd_boolean
8649 elf32_arm_relocate_section (bfd * output_bfd,
8650 struct bfd_link_info * info,
8651 bfd * input_bfd,
8652 asection * input_section,
8653 bfd_byte * contents,
8654 Elf_Internal_Rela * relocs,
8655 Elf_Internal_Sym * local_syms,
8656 asection ** local_sections)
8657 {
8658 Elf_Internal_Shdr *symtab_hdr;
8659 struct elf_link_hash_entry **sym_hashes;
8660 Elf_Internal_Rela *rel;
8661 Elf_Internal_Rela *relend;
8662 const char *name;
8663 struct elf32_arm_link_hash_table * globals;
8664
8665 globals = elf32_arm_hash_table (info);
8666
8667 symtab_hdr = & elf_symtab_hdr (input_bfd);
8668 sym_hashes = elf_sym_hashes (input_bfd);
8669
8670 rel = relocs;
8671 relend = relocs + input_section->reloc_count;
8672 for (; rel < relend; rel++)
8673 {
8674 int r_type;
8675 reloc_howto_type * howto;
8676 unsigned long r_symndx;
8677 Elf_Internal_Sym * sym;
8678 asection * sec;
8679 struct elf_link_hash_entry * h;
8680 bfd_vma relocation;
8681 bfd_reloc_status_type r;
8682 arelent bfd_reloc;
8683 char sym_type;
8684 bfd_boolean unresolved_reloc = FALSE;
8685 char *error_message = NULL;
8686
8687 r_symndx = ELF32_R_SYM (rel->r_info);
8688 r_type = ELF32_R_TYPE (rel->r_info);
8689 r_type = arm_real_reloc_type (globals, r_type);
8690
8691 if ( r_type == R_ARM_GNU_VTENTRY
8692 || r_type == R_ARM_GNU_VTINHERIT)
8693 continue;
8694
8695 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8696 howto = bfd_reloc.howto;
8697
8698 h = NULL;
8699 sym = NULL;
8700 sec = NULL;
8701
8702 if (r_symndx < symtab_hdr->sh_info)
8703 {
8704 sym = local_syms + r_symndx;
8705 sym_type = ELF32_ST_TYPE (sym->st_info);
8706 sec = local_sections[r_symndx];
8707 if (globals->use_rel)
8708 {
8709 relocation = (sec->output_section->vma
8710 + sec->output_offset
8711 + sym->st_value);
8712 if (!info->relocatable
8713 && (sec->flags & SEC_MERGE)
8714 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8715 {
8716 asection *msec;
8717 bfd_vma addend, value;
8718
8719 switch (r_type)
8720 {
8721 case R_ARM_MOVW_ABS_NC:
8722 case R_ARM_MOVT_ABS:
8723 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8724 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8725 addend = (addend ^ 0x8000) - 0x8000;
8726 break;
8727
8728 case R_ARM_THM_MOVW_ABS_NC:
8729 case R_ARM_THM_MOVT_ABS:
8730 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8731 << 16;
8732 value |= bfd_get_16 (input_bfd,
8733 contents + rel->r_offset + 2);
8734 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8735 | ((value & 0x04000000) >> 15);
8736 addend = (addend ^ 0x8000) - 0x8000;
8737 break;
8738
8739 default:
8740 if (howto->rightshift
8741 || (howto->src_mask & (howto->src_mask + 1)))
8742 {
8743 (*_bfd_error_handler)
8744 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8745 input_bfd, input_section,
8746 (long) rel->r_offset, howto->name);
8747 return FALSE;
8748 }
8749
8750 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8751
8752 /* Get the (signed) value from the instruction. */
8753 addend = value & howto->src_mask;
8754 if (addend & ((howto->src_mask + 1) >> 1))
8755 {
8756 bfd_signed_vma mask;
8757
8758 mask = -1;
8759 mask &= ~ howto->src_mask;
8760 addend |= mask;
8761 }
8762 break;
8763 }
8764
8765 msec = sec;
8766 addend =
8767 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8768 - relocation;
8769 addend += msec->output_section->vma + msec->output_offset;
8770
8771 /* Cases here must match those in the preceeding
8772 switch statement. */
8773 switch (r_type)
8774 {
8775 case R_ARM_MOVW_ABS_NC:
8776 case R_ARM_MOVT_ABS:
8777 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8778 | (addend & 0xfff);
8779 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8780 break;
8781
8782 case R_ARM_THM_MOVW_ABS_NC:
8783 case R_ARM_THM_MOVT_ABS:
8784 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8785 | (addend & 0xff) | ((addend & 0x0800) << 15);
8786 bfd_put_16 (input_bfd, value >> 16,
8787 contents + rel->r_offset);
8788 bfd_put_16 (input_bfd, value,
8789 contents + rel->r_offset + 2);
8790 break;
8791
8792 default:
8793 value = (value & ~ howto->dst_mask)
8794 | (addend & howto->dst_mask);
8795 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8796 break;
8797 }
8798 }
8799 }
8800 else
8801 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8802 }
8803 else
8804 {
8805 bfd_boolean warned;
8806
8807 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8808 r_symndx, symtab_hdr, sym_hashes,
8809 h, sec, relocation,
8810 unresolved_reloc, warned);
8811
8812 sym_type = h->type;
8813 }
8814
8815 if (sec != NULL && elf_discarded_section (sec))
8816 {
8817 /* For relocs against symbols from removed linkonce sections,
8818 or sections discarded by a linker script, we just want the
8819 section contents zeroed. Avoid any special processing. */
8820 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8821 rel->r_info = 0;
8822 rel->r_addend = 0;
8823 continue;
8824 }
8825
8826 if (info->relocatable)
8827 {
8828 /* This is a relocatable link. We don't have to change
8829 anything, unless the reloc is against a section symbol,
8830 in which case we have to adjust according to where the
8831 section symbol winds up in the output section. */
8832 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8833 {
8834 if (globals->use_rel)
8835 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8836 howto, (bfd_signed_vma) sec->output_offset);
8837 else
8838 rel->r_addend += sec->output_offset;
8839 }
8840 continue;
8841 }
8842
8843 if (h != NULL)
8844 name = h->root.root.string;
8845 else
8846 {
8847 name = (bfd_elf_string_from_elf_section
8848 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8849 if (name == NULL || *name == '\0')
8850 name = bfd_section_name (input_bfd, sec);
8851 }
8852
8853 if (r_symndx != 0
8854 && r_type != R_ARM_NONE
8855 && (h == NULL
8856 || h->root.type == bfd_link_hash_defined
8857 || h->root.type == bfd_link_hash_defweak)
8858 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8859 {
8860 (*_bfd_error_handler)
8861 ((sym_type == STT_TLS
8862 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8863 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8864 input_bfd,
8865 input_section,
8866 (long) rel->r_offset,
8867 howto->name,
8868 name);
8869 }
8870
8871 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8872 input_section, contents, rel,
8873 relocation, info, sec, name,
8874 (h ? ELF_ST_TYPE (h->type) :
8875 ELF_ST_TYPE (sym->st_info)), h,
8876 &unresolved_reloc, &error_message);
8877
8878 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8879 because such sections are not SEC_ALLOC and thus ld.so will
8880 not process them. */
8881 if (unresolved_reloc
8882 && !((input_section->flags & SEC_DEBUGGING) != 0
8883 && h->def_dynamic))
8884 {
8885 (*_bfd_error_handler)
8886 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8887 input_bfd,
8888 input_section,
8889 (long) rel->r_offset,
8890 howto->name,
8891 h->root.root.string);
8892 return FALSE;
8893 }
8894
8895 if (r != bfd_reloc_ok)
8896 {
8897 switch (r)
8898 {
8899 case bfd_reloc_overflow:
8900 /* If the overflowing reloc was to an undefined symbol,
8901 we have already printed one error message and there
8902 is no point complaining again. */
8903 if ((! h ||
8904 h->root.type != bfd_link_hash_undefined)
8905 && (!((*info->callbacks->reloc_overflow)
8906 (info, (h ? &h->root : NULL), name, howto->name,
8907 (bfd_vma) 0, input_bfd, input_section,
8908 rel->r_offset))))
8909 return FALSE;
8910 break;
8911
8912 case bfd_reloc_undefined:
8913 if (!((*info->callbacks->undefined_symbol)
8914 (info, name, input_bfd, input_section,
8915 rel->r_offset, TRUE)))
8916 return FALSE;
8917 break;
8918
8919 case bfd_reloc_outofrange:
8920 error_message = _("out of range");
8921 goto common_error;
8922
8923 case bfd_reloc_notsupported:
8924 error_message = _("unsupported relocation");
8925 goto common_error;
8926
8927 case bfd_reloc_dangerous:
8928 /* error_message should already be set. */
8929 goto common_error;
8930
8931 default:
8932 error_message = _("unknown error");
8933 /* Fall through. */
8934
8935 common_error:
8936 BFD_ASSERT (error_message != NULL);
8937 if (!((*info->callbacks->reloc_dangerous)
8938 (info, error_message, input_bfd, input_section,
8939 rel->r_offset)))
8940 return FALSE;
8941 break;
8942 }
8943 }
8944 }
8945
8946 return TRUE;
8947 }
8948
8949 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8950 adds the edit to the start of the list. (The list must be built in order of
8951 ascending INDEX: the function's callers are primarily responsible for
8952 maintaining that condition). */
8953
8954 static void
8955 add_unwind_table_edit (arm_unwind_table_edit **head,
8956 arm_unwind_table_edit **tail,
8957 arm_unwind_edit_type type,
8958 asection *linked_section,
8959 unsigned int index)
8960 {
8961 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8962
8963 new_edit->type = type;
8964 new_edit->linked_section = linked_section;
8965 new_edit->index = index;
8966
8967 if (index > 0)
8968 {
8969 new_edit->next = NULL;
8970
8971 if (*tail)
8972 (*tail)->next = new_edit;
8973
8974 (*tail) = new_edit;
8975
8976 if (!*head)
8977 (*head) = new_edit;
8978 }
8979 else
8980 {
8981 new_edit->next = *head;
8982
8983 if (!*tail)
8984 *tail = new_edit;
8985
8986 *head = new_edit;
8987 }
8988 }
8989
8990 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8991
8992 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8993 static void
8994 adjust_exidx_size(asection *exidx_sec, int adjust)
8995 {
8996 asection *out_sec;
8997
8998 if (!exidx_sec->rawsize)
8999 exidx_sec->rawsize = exidx_sec->size;
9000
9001 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9002 out_sec = exidx_sec->output_section;
9003 /* Adjust size of output section. */
9004 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9005 }
9006
9007 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9008 static void
9009 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9010 {
9011 struct _arm_elf_section_data *exidx_arm_data;
9012
9013 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9014 add_unwind_table_edit (
9015 &exidx_arm_data->u.exidx.unwind_edit_list,
9016 &exidx_arm_data->u.exidx.unwind_edit_tail,
9017 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9018
9019 adjust_exidx_size(exidx_sec, 8);
9020 }
9021
9022 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9023 made to those tables, such that:
9024
9025 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9026 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9027 codes which have been inlined into the index).
9028
9029 The edits are applied when the tables are written
9030 (in elf32_arm_write_section).
9031 */
9032
9033 bfd_boolean
9034 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9035 unsigned int num_text_sections,
9036 struct bfd_link_info *info)
9037 {
9038 bfd *inp;
9039 unsigned int last_second_word = 0, i;
9040 asection *last_exidx_sec = NULL;
9041 asection *last_text_sec = NULL;
9042 int last_unwind_type = -1;
9043
9044 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9045 text sections. */
9046 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9047 {
9048 asection *sec;
9049
9050 for (sec = inp->sections; sec != NULL; sec = sec->next)
9051 {
9052 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9053 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9054
9055 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9056 continue;
9057
9058 if (elf_sec->linked_to)
9059 {
9060 Elf_Internal_Shdr *linked_hdr
9061 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9062 struct _arm_elf_section_data *linked_sec_arm_data
9063 = get_arm_elf_section_data (linked_hdr->bfd_section);
9064
9065 if (linked_sec_arm_data == NULL)
9066 continue;
9067
9068 /* Link this .ARM.exidx section back from the text section it
9069 describes. */
9070 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9071 }
9072 }
9073 }
9074
9075 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9076 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9077 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9078 */
9079
9080 for (i = 0; i < num_text_sections; i++)
9081 {
9082 asection *sec = text_section_order[i];
9083 asection *exidx_sec;
9084 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9085 struct _arm_elf_section_data *exidx_arm_data;
9086 bfd_byte *contents = NULL;
9087 int deleted_exidx_bytes = 0;
9088 bfd_vma j;
9089 arm_unwind_table_edit *unwind_edit_head = NULL;
9090 arm_unwind_table_edit *unwind_edit_tail = NULL;
9091 Elf_Internal_Shdr *hdr;
9092 bfd *ibfd;
9093
9094 if (arm_data == NULL)
9095 continue;
9096
9097 exidx_sec = arm_data->u.text.arm_exidx_sec;
9098 if (exidx_sec == NULL)
9099 {
9100 /* Section has no unwind data. */
9101 if (last_unwind_type == 0 || !last_exidx_sec)
9102 continue;
9103
9104 /* Ignore zero sized sections. */
9105 if (sec->size == 0)
9106 continue;
9107
9108 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9109 last_unwind_type = 0;
9110 continue;
9111 }
9112
9113 /* Skip /DISCARD/ sections. */
9114 if (bfd_is_abs_section (exidx_sec->output_section))
9115 continue;
9116
9117 hdr = &elf_section_data (exidx_sec)->this_hdr;
9118 if (hdr->sh_type != SHT_ARM_EXIDX)
9119 continue;
9120
9121 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9122 if (exidx_arm_data == NULL)
9123 continue;
9124
9125 ibfd = exidx_sec->owner;
9126
9127 if (hdr->contents != NULL)
9128 contents = hdr->contents;
9129 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9130 /* An error? */
9131 continue;
9132
9133 for (j = 0; j < hdr->sh_size; j += 8)
9134 {
9135 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9136 int unwind_type;
9137 int elide = 0;
9138
9139 /* An EXIDX_CANTUNWIND entry. */
9140 if (second_word == 1)
9141 {
9142 if (last_unwind_type == 0)
9143 elide = 1;
9144 unwind_type = 0;
9145 }
9146 /* Inlined unwinding data. Merge if equal to previous. */
9147 else if ((second_word & 0x80000000) != 0)
9148 {
9149 if (last_second_word == second_word && last_unwind_type == 1)
9150 elide = 1;
9151 unwind_type = 1;
9152 last_second_word = second_word;
9153 }
9154 /* Normal table entry. In theory we could merge these too,
9155 but duplicate entries are likely to be much less common. */
9156 else
9157 unwind_type = 2;
9158
9159 if (elide)
9160 {
9161 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9162 DELETE_EXIDX_ENTRY, NULL, j / 8);
9163
9164 deleted_exidx_bytes += 8;
9165 }
9166
9167 last_unwind_type = unwind_type;
9168 }
9169
9170 /* Free contents if we allocated it ourselves. */
9171 if (contents != hdr->contents)
9172 free (contents);
9173
9174 /* Record edits to be applied later (in elf32_arm_write_section). */
9175 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9176 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9177
9178 if (deleted_exidx_bytes > 0)
9179 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9180
9181 last_exidx_sec = exidx_sec;
9182 last_text_sec = sec;
9183 }
9184
9185 /* Add terminating CANTUNWIND entry. */
9186 if (last_exidx_sec && last_unwind_type != 0)
9187 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9188
9189 return TRUE;
9190 }
9191
9192 static bfd_boolean
9193 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9194 bfd *ibfd, const char *name)
9195 {
9196 asection *sec, *osec;
9197
9198 sec = bfd_get_section_by_name (ibfd, name);
9199 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9200 return TRUE;
9201
9202 osec = sec->output_section;
9203 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9204 return TRUE;
9205
9206 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9207 sec->output_offset, sec->size))
9208 return FALSE;
9209
9210 return TRUE;
9211 }
9212
9213 static bfd_boolean
9214 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9215 {
9216 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9217
9218 /* Invoke the regular ELF backend linker to do all the work. */
9219 if (!bfd_elf_final_link (abfd, info))
9220 return FALSE;
9221
9222 /* Write out any glue sections now that we have created all the
9223 stubs. */
9224 if (globals->bfd_of_glue_owner != NULL)
9225 {
9226 if (! elf32_arm_output_glue_section (info, abfd,
9227 globals->bfd_of_glue_owner,
9228 ARM2THUMB_GLUE_SECTION_NAME))
9229 return FALSE;
9230
9231 if (! elf32_arm_output_glue_section (info, abfd,
9232 globals->bfd_of_glue_owner,
9233 THUMB2ARM_GLUE_SECTION_NAME))
9234 return FALSE;
9235
9236 if (! elf32_arm_output_glue_section (info, abfd,
9237 globals->bfd_of_glue_owner,
9238 VFP11_ERRATUM_VENEER_SECTION_NAME))
9239 return FALSE;
9240
9241 if (! elf32_arm_output_glue_section (info, abfd,
9242 globals->bfd_of_glue_owner,
9243 ARM_BX_GLUE_SECTION_NAME))
9244 return FALSE;
9245 }
9246
9247 return TRUE;
9248 }
9249
9250 /* Set the right machine number. */
9251
9252 static bfd_boolean
9253 elf32_arm_object_p (bfd *abfd)
9254 {
9255 unsigned int mach;
9256
9257 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9258
9259 if (mach != bfd_mach_arm_unknown)
9260 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9261
9262 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9263 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9264
9265 else
9266 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9267
9268 return TRUE;
9269 }
9270
9271 /* Function to keep ARM specific flags in the ELF header. */
9272
9273 static bfd_boolean
9274 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9275 {
9276 if (elf_flags_init (abfd)
9277 && elf_elfheader (abfd)->e_flags != flags)
9278 {
9279 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9280 {
9281 if (flags & EF_ARM_INTERWORK)
9282 (*_bfd_error_handler)
9283 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9284 abfd);
9285 else
9286 _bfd_error_handler
9287 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9288 abfd);
9289 }
9290 }
9291 else
9292 {
9293 elf_elfheader (abfd)->e_flags = flags;
9294 elf_flags_init (abfd) = TRUE;
9295 }
9296
9297 return TRUE;
9298 }
9299
9300 /* Copy backend specific data from one object module to another. */
9301
9302 static bfd_boolean
9303 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9304 {
9305 flagword in_flags;
9306 flagword out_flags;
9307
9308 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9309 return TRUE;
9310
9311 in_flags = elf_elfheader (ibfd)->e_flags;
9312 out_flags = elf_elfheader (obfd)->e_flags;
9313
9314 if (elf_flags_init (obfd)
9315 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9316 && in_flags != out_flags)
9317 {
9318 /* Cannot mix APCS26 and APCS32 code. */
9319 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9320 return FALSE;
9321
9322 /* Cannot mix float APCS and non-float APCS code. */
9323 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9324 return FALSE;
9325
9326 /* If the src and dest have different interworking flags
9327 then turn off the interworking bit. */
9328 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9329 {
9330 if (out_flags & EF_ARM_INTERWORK)
9331 _bfd_error_handler
9332 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9333 obfd, ibfd);
9334
9335 in_flags &= ~EF_ARM_INTERWORK;
9336 }
9337
9338 /* Likewise for PIC, though don't warn for this case. */
9339 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9340 in_flags &= ~EF_ARM_PIC;
9341 }
9342
9343 elf_elfheader (obfd)->e_flags = in_flags;
9344 elf_flags_init (obfd) = TRUE;
9345
9346 /* Also copy the EI_OSABI field. */
9347 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9348 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9349
9350 /* Copy object attributes. */
9351 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9352
9353 return TRUE;
9354 }
9355
9356 /* Values for Tag_ABI_PCS_R9_use. */
9357 enum
9358 {
9359 AEABI_R9_V6,
9360 AEABI_R9_SB,
9361 AEABI_R9_TLS,
9362 AEABI_R9_unused
9363 };
9364
9365 /* Values for Tag_ABI_PCS_RW_data. */
9366 enum
9367 {
9368 AEABI_PCS_RW_data_absolute,
9369 AEABI_PCS_RW_data_PCrel,
9370 AEABI_PCS_RW_data_SBrel,
9371 AEABI_PCS_RW_data_unused
9372 };
9373
9374 /* Values for Tag_ABI_enum_size. */
9375 enum
9376 {
9377 AEABI_enum_unused,
9378 AEABI_enum_short,
9379 AEABI_enum_wide,
9380 AEABI_enum_forced_wide
9381 };
9382
9383 /* Determine whether an object attribute tag takes an integer, a
9384 string or both. */
9385
9386 static int
9387 elf32_arm_obj_attrs_arg_type (int tag)
9388 {
9389 if (tag == Tag_compatibility)
9390 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9391 else if (tag == Tag_nodefaults)
9392 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9393 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9394 return ATTR_TYPE_FLAG_STR_VAL;
9395 else if (tag < 32)
9396 return ATTR_TYPE_FLAG_INT_VAL;
9397 else
9398 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9399 }
9400
9401 /* The ABI defines that Tag_conformance should be emitted first, and that
9402 Tag_nodefaults should be second (if either is defined). This sets those
9403 two positions, and bumps up the position of all the remaining tags to
9404 compensate. */
9405 static int
9406 elf32_arm_obj_attrs_order (int num)
9407 {
9408 if (num == 4)
9409 return Tag_conformance;
9410 if (num == 5)
9411 return Tag_nodefaults;
9412 if ((num - 2) < Tag_nodefaults)
9413 return num - 2;
9414 if ((num - 1) < Tag_conformance)
9415 return num - 1;
9416 return num;
9417 }
9418
9419 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9420 Returns -1 if no architecture could be read. */
9421
9422 static int
9423 get_secondary_compatible_arch (bfd *abfd)
9424 {
9425 obj_attribute *attr =
9426 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9427
9428 /* Note: the tag and its argument below are uleb128 values, though
9429 currently-defined values fit in one byte for each. */
9430 if (attr->s
9431 && attr->s[0] == Tag_CPU_arch
9432 && (attr->s[1] & 128) != 128
9433 && attr->s[2] == 0)
9434 return attr->s[1];
9435
9436 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9437 return -1;
9438 }
9439
9440 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9441 The tag is removed if ARCH is -1. */
9442
9443 static void
9444 set_secondary_compatible_arch (bfd *abfd, int arch)
9445 {
9446 obj_attribute *attr =
9447 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9448
9449 if (arch == -1)
9450 {
9451 attr->s = NULL;
9452 return;
9453 }
9454
9455 /* Note: the tag and its argument below are uleb128 values, though
9456 currently-defined values fit in one byte for each. */
9457 if (!attr->s)
9458 attr->s = bfd_alloc (abfd, 3);
9459 attr->s[0] = Tag_CPU_arch;
9460 attr->s[1] = arch;
9461 attr->s[2] = '\0';
9462 }
9463
9464 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9465 into account. */
9466
9467 static int
9468 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9469 int newtag, int secondary_compat)
9470 {
9471 #define T(X) TAG_CPU_ARCH_##X
9472 int tagl, tagh, result;
9473 const int v6t2[] =
9474 {
9475 T(V6T2), /* PRE_V4. */
9476 T(V6T2), /* V4. */
9477 T(V6T2), /* V4T. */
9478 T(V6T2), /* V5T. */
9479 T(V6T2), /* V5TE. */
9480 T(V6T2), /* V5TEJ. */
9481 T(V6T2), /* V6. */
9482 T(V7), /* V6KZ. */
9483 T(V6T2) /* V6T2. */
9484 };
9485 const int v6k[] =
9486 {
9487 T(V6K), /* PRE_V4. */
9488 T(V6K), /* V4. */
9489 T(V6K), /* V4T. */
9490 T(V6K), /* V5T. */
9491 T(V6K), /* V5TE. */
9492 T(V6K), /* V5TEJ. */
9493 T(V6K), /* V6. */
9494 T(V6KZ), /* V6KZ. */
9495 T(V7), /* V6T2. */
9496 T(V6K) /* V6K. */
9497 };
9498 const int v7[] =
9499 {
9500 T(V7), /* PRE_V4. */
9501 T(V7), /* V4. */
9502 T(V7), /* V4T. */
9503 T(V7), /* V5T. */
9504 T(V7), /* V5TE. */
9505 T(V7), /* V5TEJ. */
9506 T(V7), /* V6. */
9507 T(V7), /* V6KZ. */
9508 T(V7), /* V6T2. */
9509 T(V7), /* V6K. */
9510 T(V7) /* V7. */
9511 };
9512 const int v6_m[] =
9513 {
9514 -1, /* PRE_V4. */
9515 -1, /* V4. */
9516 T(V6K), /* V4T. */
9517 T(V6K), /* V5T. */
9518 T(V6K), /* V5TE. */
9519 T(V6K), /* V5TEJ. */
9520 T(V6K), /* V6. */
9521 T(V6KZ), /* V6KZ. */
9522 T(V7), /* V6T2. */
9523 T(V6K), /* V6K. */
9524 T(V7), /* V7. */
9525 T(V6_M) /* V6_M. */
9526 };
9527 const int v6s_m[] =
9528 {
9529 -1, /* PRE_V4. */
9530 -1, /* V4. */
9531 T(V6K), /* V4T. */
9532 T(V6K), /* V5T. */
9533 T(V6K), /* V5TE. */
9534 T(V6K), /* V5TEJ. */
9535 T(V6K), /* V6. */
9536 T(V6KZ), /* V6KZ. */
9537 T(V7), /* V6T2. */
9538 T(V6K), /* V6K. */
9539 T(V7), /* V7. */
9540 T(V6S_M), /* V6_M. */
9541 T(V6S_M) /* V6S_M. */
9542 };
9543 const int v4t_plus_v6_m[] =
9544 {
9545 -1, /* PRE_V4. */
9546 -1, /* V4. */
9547 T(V4T), /* V4T. */
9548 T(V5T), /* V5T. */
9549 T(V5TE), /* V5TE. */
9550 T(V5TEJ), /* V5TEJ. */
9551 T(V6), /* V6. */
9552 T(V6KZ), /* V6KZ. */
9553 T(V6T2), /* V6T2. */
9554 T(V6K), /* V6K. */
9555 T(V7), /* V7. */
9556 T(V6_M), /* V6_M. */
9557 T(V6S_M), /* V6S_M. */
9558 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9559 };
9560 const int *comb[] =
9561 {
9562 v6t2,
9563 v6k,
9564 v7,
9565 v6_m,
9566 v6s_m,
9567 /* Pseudo-architecture. */
9568 v4t_plus_v6_m
9569 };
9570
9571 /* Check we've not got a higher architecture than we know about. */
9572
9573 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9574 {
9575 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9576 return -1;
9577 }
9578
9579 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9580
9581 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9582 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9583 oldtag = T(V4T_PLUS_V6_M);
9584
9585 /* And override the new tag if we have a Tag_also_compatible_with on the
9586 input. */
9587
9588 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9589 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9590 newtag = T(V4T_PLUS_V6_M);
9591
9592 tagl = (oldtag < newtag) ? oldtag : newtag;
9593 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9594
9595 /* Architectures before V6KZ add features monotonically. */
9596 if (tagh <= TAG_CPU_ARCH_V6KZ)
9597 return result;
9598
9599 result = comb[tagh - T(V6T2)][tagl];
9600
9601 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9602 as the canonical version. */
9603 if (result == T(V4T_PLUS_V6_M))
9604 {
9605 result = T(V4T);
9606 *secondary_compat_out = T(V6_M);
9607 }
9608 else
9609 *secondary_compat_out = -1;
9610
9611 if (result == -1)
9612 {
9613 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9614 ibfd, oldtag, newtag);
9615 return -1;
9616 }
9617
9618 return result;
9619 #undef T
9620 }
9621
9622 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9623 are conflicting attributes. */
9624
9625 static bfd_boolean
9626 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9627 {
9628 obj_attribute *in_attr;
9629 obj_attribute *out_attr;
9630 obj_attribute_list *in_list;
9631 obj_attribute_list *out_list;
9632 obj_attribute_list **out_listp;
9633 /* Some tags have 0 = don't care, 1 = strong requirement,
9634 2 = weak requirement. */
9635 static const int order_021[3] = {0, 2, 1};
9636 /* For use with Tag_VFP_arch. */
9637 static const int order_01243[5] = {0, 1, 2, 4, 3};
9638 int i;
9639 bfd_boolean result = TRUE;
9640
9641 /* Skip the linker stubs file. This preserves previous behavior
9642 of accepting unknown attributes in the first input file - but
9643 is that a bug? */
9644 if (ibfd->flags & BFD_LINKER_CREATED)
9645 return TRUE;
9646
9647 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9648 {
9649 /* This is the first object. Copy the attributes. */
9650 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9651
9652 /* Use the Tag_null value to indicate the attributes have been
9653 initialized. */
9654 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9655
9656 return TRUE;
9657 }
9658
9659 in_attr = elf_known_obj_attributes_proc (ibfd);
9660 out_attr = elf_known_obj_attributes_proc (obfd);
9661 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9662 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9663 {
9664 /* Ignore mismatches if the object doesn't use floating point. */
9665 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9666 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9667 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9668 {
9669 _bfd_error_handler
9670 (_("error: %B uses VFP register arguments, %B does not"),
9671 ibfd, obfd);
9672 result = FALSE;
9673 }
9674 }
9675
9676 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9677 {
9678 /* Merge this attribute with existing attributes. */
9679 switch (i)
9680 {
9681 case Tag_CPU_raw_name:
9682 case Tag_CPU_name:
9683 /* These are merged after Tag_CPU_arch. */
9684 break;
9685
9686 case Tag_ABI_optimization_goals:
9687 case Tag_ABI_FP_optimization_goals:
9688 /* Use the first value seen. */
9689 break;
9690
9691 case Tag_CPU_arch:
9692 {
9693 int secondary_compat = -1, secondary_compat_out = -1;
9694 unsigned int saved_out_attr = out_attr[i].i;
9695 static const char *name_table[] = {
9696 /* These aren't real CPU names, but we can't guess
9697 that from the architecture version alone. */
9698 "Pre v4",
9699 "ARM v4",
9700 "ARM v4T",
9701 "ARM v5T",
9702 "ARM v5TE",
9703 "ARM v5TEJ",
9704 "ARM v6",
9705 "ARM v6KZ",
9706 "ARM v6T2",
9707 "ARM v6K",
9708 "ARM v7",
9709 "ARM v6-M",
9710 "ARM v6S-M"
9711 };
9712
9713 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9714 secondary_compat = get_secondary_compatible_arch (ibfd);
9715 secondary_compat_out = get_secondary_compatible_arch (obfd);
9716 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9717 &secondary_compat_out,
9718 in_attr[i].i,
9719 secondary_compat);
9720 set_secondary_compatible_arch (obfd, secondary_compat_out);
9721
9722 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9723 if (out_attr[i].i == saved_out_attr)
9724 ; /* Leave the names alone. */
9725 else if (out_attr[i].i == in_attr[i].i)
9726 {
9727 /* The output architecture has been changed to match the
9728 input architecture. Use the input names. */
9729 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9730 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9731 : NULL;
9732 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9733 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9734 : NULL;
9735 }
9736 else
9737 {
9738 out_attr[Tag_CPU_name].s = NULL;
9739 out_attr[Tag_CPU_raw_name].s = NULL;
9740 }
9741
9742 /* If we still don't have a value for Tag_CPU_name,
9743 make one up now. Tag_CPU_raw_name remains blank. */
9744 if (out_attr[Tag_CPU_name].s == NULL
9745 && out_attr[i].i < ARRAY_SIZE (name_table))
9746 out_attr[Tag_CPU_name].s =
9747 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9748 }
9749 break;
9750
9751 case Tag_ARM_ISA_use:
9752 case Tag_THUMB_ISA_use:
9753 case Tag_WMMX_arch:
9754 case Tag_Advanced_SIMD_arch:
9755 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9756 case Tag_ABI_FP_rounding:
9757 case Tag_ABI_FP_exceptions:
9758 case Tag_ABI_FP_user_exceptions:
9759 case Tag_ABI_FP_number_model:
9760 case Tag_VFP_HP_extension:
9761 case Tag_CPU_unaligned_access:
9762 case Tag_T2EE_use:
9763 case Tag_Virtualization_use:
9764 case Tag_MPextension_use:
9765 /* Use the largest value specified. */
9766 if (in_attr[i].i > out_attr[i].i)
9767 out_attr[i].i = in_attr[i].i;
9768 break;
9769
9770 case Tag_ABI_align8_preserved:
9771 case Tag_ABI_PCS_RO_data:
9772 /* Use the smallest value specified. */
9773 if (in_attr[i].i < out_attr[i].i)
9774 out_attr[i].i = in_attr[i].i;
9775 break;
9776
9777 case Tag_ABI_align8_needed:
9778 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9779 && (in_attr[Tag_ABI_align8_preserved].i == 0
9780 || out_attr[Tag_ABI_align8_preserved].i == 0))
9781 {
9782 /* This error message should be enabled once all non-conformant
9783 binaries in the toolchain have had the attributes set
9784 properly.
9785 _bfd_error_handler
9786 (_("error: %B: 8-byte data alignment conflicts with %B"),
9787 obfd, ibfd);
9788 result = FALSE; */
9789 }
9790 /* Fall through. */
9791 case Tag_ABI_FP_denormal:
9792 case Tag_ABI_PCS_GOT_use:
9793 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9794 value if greater than 2 (for future-proofing). */
9795 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9796 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9797 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9798 out_attr[i].i = in_attr[i].i;
9799 break;
9800
9801
9802 case Tag_CPU_arch_profile:
9803 if (out_attr[i].i != in_attr[i].i)
9804 {
9805 /* 0 will merge with anything.
9806 'A' and 'S' merge to 'A'.
9807 'R' and 'S' merge to 'R'.
9808 'M' and 'A|R|S' is an error. */
9809 if (out_attr[i].i == 0
9810 || (out_attr[i].i == 'S'
9811 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9812 out_attr[i].i = in_attr[i].i;
9813 else if (in_attr[i].i == 0
9814 || (in_attr[i].i == 'S'
9815 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9816 ; /* Do nothing. */
9817 else
9818 {
9819 _bfd_error_handler
9820 (_("error: %B: Conflicting architecture profiles %c/%c"),
9821 ibfd,
9822 in_attr[i].i ? in_attr[i].i : '0',
9823 out_attr[i].i ? out_attr[i].i : '0');
9824 result = FALSE;
9825 }
9826 }
9827 break;
9828 case Tag_VFP_arch:
9829 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9830 largest value if greater than 4 (for future-proofing). */
9831 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9832 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9833 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9834 out_attr[i].i = in_attr[i].i;
9835 break;
9836 case Tag_PCS_config:
9837 if (out_attr[i].i == 0)
9838 out_attr[i].i = in_attr[i].i;
9839 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9840 {
9841 /* It's sometimes ok to mix different configs, so this is only
9842 a warning. */
9843 _bfd_error_handler
9844 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9845 }
9846 break;
9847 case Tag_ABI_PCS_R9_use:
9848 if (in_attr[i].i != out_attr[i].i
9849 && out_attr[i].i != AEABI_R9_unused
9850 && in_attr[i].i != AEABI_R9_unused)
9851 {
9852 _bfd_error_handler
9853 (_("error: %B: Conflicting use of R9"), ibfd);
9854 result = FALSE;
9855 }
9856 if (out_attr[i].i == AEABI_R9_unused)
9857 out_attr[i].i = in_attr[i].i;
9858 break;
9859 case Tag_ABI_PCS_RW_data:
9860 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9861 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9862 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9863 {
9864 _bfd_error_handler
9865 (_("error: %B: SB relative addressing conflicts with use of R9"),
9866 ibfd);
9867 result = FALSE;
9868 }
9869 /* Use the smallest value specified. */
9870 if (in_attr[i].i < out_attr[i].i)
9871 out_attr[i].i = in_attr[i].i;
9872 break;
9873 case Tag_ABI_PCS_wchar_t:
9874 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9875 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9876 {
9877 _bfd_error_handler
9878 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9879 ibfd, in_attr[i].i, out_attr[i].i);
9880 }
9881 else if (in_attr[i].i && !out_attr[i].i)
9882 out_attr[i].i = in_attr[i].i;
9883 break;
9884 case Tag_ABI_enum_size:
9885 if (in_attr[i].i != AEABI_enum_unused)
9886 {
9887 if (out_attr[i].i == AEABI_enum_unused
9888 || out_attr[i].i == AEABI_enum_forced_wide)
9889 {
9890 /* The existing object is compatible with anything.
9891 Use whatever requirements the new object has. */
9892 out_attr[i].i = in_attr[i].i;
9893 }
9894 else if (in_attr[i].i != AEABI_enum_forced_wide
9895 && out_attr[i].i != in_attr[i].i
9896 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9897 {
9898 static const char *aeabi_enum_names[] =
9899 { "", "variable-size", "32-bit", "" };
9900 const char *in_name =
9901 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9902 ? aeabi_enum_names[in_attr[i].i]
9903 : "<unknown>";
9904 const char *out_name =
9905 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9906 ? aeabi_enum_names[out_attr[i].i]
9907 : "<unknown>";
9908 _bfd_error_handler
9909 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9910 ibfd, in_name, out_name);
9911 }
9912 }
9913 break;
9914 case Tag_ABI_VFP_args:
9915 /* Aready done. */
9916 break;
9917 case Tag_ABI_WMMX_args:
9918 if (in_attr[i].i != out_attr[i].i)
9919 {
9920 _bfd_error_handler
9921 (_("error: %B uses iWMMXt register arguments, %B does not"),
9922 ibfd, obfd);
9923 result = FALSE;
9924 }
9925 break;
9926 case Tag_compatibility:
9927 /* Merged in target-independent code. */
9928 break;
9929 case Tag_ABI_HardFP_use:
9930 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9931 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9932 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9933 out_attr[i].i = 3;
9934 else if (in_attr[i].i > out_attr[i].i)
9935 out_attr[i].i = in_attr[i].i;
9936 break;
9937 case Tag_ABI_FP_16bit_format:
9938 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9939 {
9940 if (in_attr[i].i != out_attr[i].i)
9941 {
9942 _bfd_error_handler
9943 (_("error: fp16 format mismatch between %B and %B"),
9944 ibfd, obfd);
9945 result = FALSE;
9946 }
9947 }
9948 if (in_attr[i].i != 0)
9949 out_attr[i].i = in_attr[i].i;
9950 break;
9951
9952 case Tag_nodefaults:
9953 /* This tag is set if it exists, but the value is unused (and is
9954 typically zero). We don't actually need to do anything here -
9955 the merge happens automatically when the type flags are merged
9956 below. */
9957 break;
9958 case Tag_also_compatible_with:
9959 /* Already done in Tag_CPU_arch. */
9960 break;
9961 case Tag_conformance:
9962 /* Keep the attribute if it matches. Throw it away otherwise.
9963 No attribute means no claim to conform. */
9964 if (!in_attr[i].s || !out_attr[i].s
9965 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9966 out_attr[i].s = NULL;
9967 break;
9968
9969 default:
9970 {
9971 bfd *err_bfd = NULL;
9972
9973 /* The "known_obj_attributes" table does contain some undefined
9974 attributes. Ensure that there are unused. */
9975 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9976 err_bfd = obfd;
9977 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9978 err_bfd = ibfd;
9979
9980 if (err_bfd != NULL)
9981 {
9982 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9983 if ((i & 127) < 64)
9984 {
9985 _bfd_error_handler
9986 (_("%B: Unknown mandatory EABI object attribute %d"),
9987 err_bfd, i);
9988 bfd_set_error (bfd_error_bad_value);
9989 result = FALSE;
9990 }
9991 else
9992 {
9993 _bfd_error_handler
9994 (_("Warning: %B: Unknown EABI object attribute %d"),
9995 err_bfd, i);
9996 }
9997 }
9998
9999 /* Only pass on attributes that match in both inputs. */
10000 if (in_attr[i].i != out_attr[i].i
10001 || in_attr[i].s != out_attr[i].s
10002 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10003 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10004 {
10005 out_attr[i].i = 0;
10006 out_attr[i].s = NULL;
10007 }
10008 }
10009 }
10010
10011 /* If out_attr was copied from in_attr then it won't have a type yet. */
10012 if (in_attr[i].type && !out_attr[i].type)
10013 out_attr[i].type = in_attr[i].type;
10014 }
10015
10016 /* Merge Tag_compatibility attributes and any common GNU ones. */
10017 _bfd_elf_merge_object_attributes (ibfd, obfd);
10018
10019 /* Check for any attributes not known on ARM. */
10020 in_list = elf_other_obj_attributes_proc (ibfd);
10021 out_listp = &elf_other_obj_attributes_proc (obfd);
10022 out_list = *out_listp;
10023
10024 for (; in_list || out_list; )
10025 {
10026 bfd *err_bfd = NULL;
10027 int err_tag = 0;
10028
10029 /* The tags for each list are in numerical order. */
10030 /* If the tags are equal, then merge. */
10031 if (out_list && (!in_list || in_list->tag > out_list->tag))
10032 {
10033 /* This attribute only exists in obfd. We can't merge, and we don't
10034 know what the tag means, so delete it. */
10035 err_bfd = obfd;
10036 err_tag = out_list->tag;
10037 *out_listp = out_list->next;
10038 out_list = *out_listp;
10039 }
10040 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10041 {
10042 /* This attribute only exists in ibfd. We can't merge, and we don't
10043 know what the tag means, so ignore it. */
10044 err_bfd = ibfd;
10045 err_tag = in_list->tag;
10046 in_list = in_list->next;
10047 }
10048 else /* The tags are equal. */
10049 {
10050 /* As present, all attributes in the list are unknown, and
10051 therefore can't be merged meaningfully. */
10052 err_bfd = obfd;
10053 err_tag = out_list->tag;
10054
10055 /* Only pass on attributes that match in both inputs. */
10056 if (in_list->attr.i != out_list->attr.i
10057 || in_list->attr.s != out_list->attr.s
10058 || (in_list->attr.s && out_list->attr.s
10059 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10060 {
10061 /* No match. Delete the attribute. */
10062 *out_listp = out_list->next;
10063 out_list = *out_listp;
10064 }
10065 else
10066 {
10067 /* Matched. Keep the attribute and move to the next. */
10068 out_list = out_list->next;
10069 in_list = in_list->next;
10070 }
10071 }
10072
10073 if (err_bfd)
10074 {
10075 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10076 if ((err_tag & 127) < 64)
10077 {
10078 _bfd_error_handler
10079 (_("%B: Unknown mandatory EABI object attribute %d"),
10080 err_bfd, err_tag);
10081 bfd_set_error (bfd_error_bad_value);
10082 result = FALSE;
10083 }
10084 else
10085 {
10086 _bfd_error_handler
10087 (_("Warning: %B: Unknown EABI object attribute %d"),
10088 err_bfd, err_tag);
10089 }
10090 }
10091 }
10092 return result;
10093 }
10094
10095
10096 /* Return TRUE if the two EABI versions are incompatible. */
10097
10098 static bfd_boolean
10099 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10100 {
10101 /* v4 and v5 are the same spec before and after it was released,
10102 so allow mixing them. */
10103 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10104 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10105 return TRUE;
10106
10107 return (iver == over);
10108 }
10109
10110 /* Merge backend specific data from an object file to the output
10111 object file when linking. */
10112
10113 static bfd_boolean
10114 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10115 {
10116 flagword out_flags;
10117 flagword in_flags;
10118 bfd_boolean flags_compatible = TRUE;
10119 asection *sec;
10120
10121 /* Check if we have the same endianess. */
10122 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10123 return FALSE;
10124
10125 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10126 return TRUE;
10127
10128 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10129 return FALSE;
10130
10131 /* The input BFD must have had its flags initialised. */
10132 /* The following seems bogus to me -- The flags are initialized in
10133 the assembler but I don't think an elf_flags_init field is
10134 written into the object. */
10135 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10136
10137 in_flags = elf_elfheader (ibfd)->e_flags;
10138 out_flags = elf_elfheader (obfd)->e_flags;
10139
10140 /* In theory there is no reason why we couldn't handle this. However
10141 in practice it isn't even close to working and there is no real
10142 reason to want it. */
10143 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10144 && !(ibfd->flags & DYNAMIC)
10145 && (in_flags & EF_ARM_BE8))
10146 {
10147 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10148 ibfd);
10149 return FALSE;
10150 }
10151
10152 if (!elf_flags_init (obfd))
10153 {
10154 /* If the input is the default architecture and had the default
10155 flags then do not bother setting the flags for the output
10156 architecture, instead allow future merges to do this. If no
10157 future merges ever set these flags then they will retain their
10158 uninitialised values, which surprise surprise, correspond
10159 to the default values. */
10160 if (bfd_get_arch_info (ibfd)->the_default
10161 && elf_elfheader (ibfd)->e_flags == 0)
10162 return TRUE;
10163
10164 elf_flags_init (obfd) = TRUE;
10165 elf_elfheader (obfd)->e_flags = in_flags;
10166
10167 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10168 && bfd_get_arch_info (obfd)->the_default)
10169 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10170
10171 return TRUE;
10172 }
10173
10174 /* Determine what should happen if the input ARM architecture
10175 does not match the output ARM architecture. */
10176 if (! bfd_arm_merge_machines (ibfd, obfd))
10177 return FALSE;
10178
10179 /* Identical flags must be compatible. */
10180 if (in_flags == out_flags)
10181 return TRUE;
10182
10183 /* Check to see if the input BFD actually contains any sections. If
10184 not, its flags may not have been initialised either, but it
10185 cannot actually cause any incompatiblity. Do not short-circuit
10186 dynamic objects; their section list may be emptied by
10187 elf_link_add_object_symbols.
10188
10189 Also check to see if there are no code sections in the input.
10190 In this case there is no need to check for code specific flags.
10191 XXX - do we need to worry about floating-point format compatability
10192 in data sections ? */
10193 if (!(ibfd->flags & DYNAMIC))
10194 {
10195 bfd_boolean null_input_bfd = TRUE;
10196 bfd_boolean only_data_sections = TRUE;
10197
10198 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10199 {
10200 /* Ignore synthetic glue sections. */
10201 if (strcmp (sec->name, ".glue_7")
10202 && strcmp (sec->name, ".glue_7t"))
10203 {
10204 if ((bfd_get_section_flags (ibfd, sec)
10205 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10206 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10207 only_data_sections = FALSE;
10208
10209 null_input_bfd = FALSE;
10210 break;
10211 }
10212 }
10213
10214 if (null_input_bfd || only_data_sections)
10215 return TRUE;
10216 }
10217
10218 /* Complain about various flag mismatches. */
10219 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10220 EF_ARM_EABI_VERSION (out_flags)))
10221 {
10222 _bfd_error_handler
10223 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10224 ibfd, obfd,
10225 (in_flags & EF_ARM_EABIMASK) >> 24,
10226 (out_flags & EF_ARM_EABIMASK) >> 24);
10227 return FALSE;
10228 }
10229
10230 /* Not sure what needs to be checked for EABI versions >= 1. */
10231 /* VxWorks libraries do not use these flags. */
10232 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10233 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10234 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10235 {
10236 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10237 {
10238 _bfd_error_handler
10239 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10240 ibfd, obfd,
10241 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10242 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10243 flags_compatible = FALSE;
10244 }
10245
10246 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10247 {
10248 if (in_flags & EF_ARM_APCS_FLOAT)
10249 _bfd_error_handler
10250 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10251 ibfd, obfd);
10252 else
10253 _bfd_error_handler
10254 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10255 ibfd, obfd);
10256
10257 flags_compatible = FALSE;
10258 }
10259
10260 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10261 {
10262 if (in_flags & EF_ARM_VFP_FLOAT)
10263 _bfd_error_handler
10264 (_("error: %B uses VFP instructions, whereas %B does not"),
10265 ibfd, obfd);
10266 else
10267 _bfd_error_handler
10268 (_("error: %B uses FPA instructions, whereas %B does not"),
10269 ibfd, obfd);
10270
10271 flags_compatible = FALSE;
10272 }
10273
10274 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10275 {
10276 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10277 _bfd_error_handler
10278 (_("error: %B uses Maverick instructions, whereas %B does not"),
10279 ibfd, obfd);
10280 else
10281 _bfd_error_handler
10282 (_("error: %B does not use Maverick instructions, whereas %B does"),
10283 ibfd, obfd);
10284
10285 flags_compatible = FALSE;
10286 }
10287
10288 #ifdef EF_ARM_SOFT_FLOAT
10289 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10290 {
10291 /* We can allow interworking between code that is VFP format
10292 layout, and uses either soft float or integer regs for
10293 passing floating point arguments and results. We already
10294 know that the APCS_FLOAT flags match; similarly for VFP
10295 flags. */
10296 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10297 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10298 {
10299 if (in_flags & EF_ARM_SOFT_FLOAT)
10300 _bfd_error_handler
10301 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10302 ibfd, obfd);
10303 else
10304 _bfd_error_handler
10305 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10306 ibfd, obfd);
10307
10308 flags_compatible = FALSE;
10309 }
10310 }
10311 #endif
10312
10313 /* Interworking mismatch is only a warning. */
10314 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10315 {
10316 if (in_flags & EF_ARM_INTERWORK)
10317 {
10318 _bfd_error_handler
10319 (_("Warning: %B supports interworking, whereas %B does not"),
10320 ibfd, obfd);
10321 }
10322 else
10323 {
10324 _bfd_error_handler
10325 (_("Warning: %B does not support interworking, whereas %B does"),
10326 ibfd, obfd);
10327 }
10328 }
10329 }
10330
10331 return flags_compatible;
10332 }
10333
10334 /* Display the flags field. */
10335
10336 static bfd_boolean
10337 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10338 {
10339 FILE * file = (FILE *) ptr;
10340 unsigned long flags;
10341
10342 BFD_ASSERT (abfd != NULL && ptr != NULL);
10343
10344 /* Print normal ELF private data. */
10345 _bfd_elf_print_private_bfd_data (abfd, ptr);
10346
10347 flags = elf_elfheader (abfd)->e_flags;
10348 /* Ignore init flag - it may not be set, despite the flags field
10349 containing valid data. */
10350
10351 /* xgettext:c-format */
10352 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10353
10354 switch (EF_ARM_EABI_VERSION (flags))
10355 {
10356 case EF_ARM_EABI_UNKNOWN:
10357 /* The following flag bits are GNU extensions and not part of the
10358 official ARM ELF extended ABI. Hence they are only decoded if
10359 the EABI version is not set. */
10360 if (flags & EF_ARM_INTERWORK)
10361 fprintf (file, _(" [interworking enabled]"));
10362
10363 if (flags & EF_ARM_APCS_26)
10364 fprintf (file, " [APCS-26]");
10365 else
10366 fprintf (file, " [APCS-32]");
10367
10368 if (flags & EF_ARM_VFP_FLOAT)
10369 fprintf (file, _(" [VFP float format]"));
10370 else if (flags & EF_ARM_MAVERICK_FLOAT)
10371 fprintf (file, _(" [Maverick float format]"));
10372 else
10373 fprintf (file, _(" [FPA float format]"));
10374
10375 if (flags & EF_ARM_APCS_FLOAT)
10376 fprintf (file, _(" [floats passed in float registers]"));
10377
10378 if (flags & EF_ARM_PIC)
10379 fprintf (file, _(" [position independent]"));
10380
10381 if (flags & EF_ARM_NEW_ABI)
10382 fprintf (file, _(" [new ABI]"));
10383
10384 if (flags & EF_ARM_OLD_ABI)
10385 fprintf (file, _(" [old ABI]"));
10386
10387 if (flags & EF_ARM_SOFT_FLOAT)
10388 fprintf (file, _(" [software FP]"));
10389
10390 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10391 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10392 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10393 | EF_ARM_MAVERICK_FLOAT);
10394 break;
10395
10396 case EF_ARM_EABI_VER1:
10397 fprintf (file, _(" [Version1 EABI]"));
10398
10399 if (flags & EF_ARM_SYMSARESORTED)
10400 fprintf (file, _(" [sorted symbol table]"));
10401 else
10402 fprintf (file, _(" [unsorted symbol table]"));
10403
10404 flags &= ~ EF_ARM_SYMSARESORTED;
10405 break;
10406
10407 case EF_ARM_EABI_VER2:
10408 fprintf (file, _(" [Version2 EABI]"));
10409
10410 if (flags & EF_ARM_SYMSARESORTED)
10411 fprintf (file, _(" [sorted symbol table]"));
10412 else
10413 fprintf (file, _(" [unsorted symbol table]"));
10414
10415 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10416 fprintf (file, _(" [dynamic symbols use segment index]"));
10417
10418 if (flags & EF_ARM_MAPSYMSFIRST)
10419 fprintf (file, _(" [mapping symbols precede others]"));
10420
10421 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10422 | EF_ARM_MAPSYMSFIRST);
10423 break;
10424
10425 case EF_ARM_EABI_VER3:
10426 fprintf (file, _(" [Version3 EABI]"));
10427 break;
10428
10429 case EF_ARM_EABI_VER4:
10430 fprintf (file, _(" [Version4 EABI]"));
10431 goto eabi;
10432
10433 case EF_ARM_EABI_VER5:
10434 fprintf (file, _(" [Version5 EABI]"));
10435 eabi:
10436 if (flags & EF_ARM_BE8)
10437 fprintf (file, _(" [BE8]"));
10438
10439 if (flags & EF_ARM_LE8)
10440 fprintf (file, _(" [LE8]"));
10441
10442 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10443 break;
10444
10445 default:
10446 fprintf (file, _(" <EABI version unrecognised>"));
10447 break;
10448 }
10449
10450 flags &= ~ EF_ARM_EABIMASK;
10451
10452 if (flags & EF_ARM_RELEXEC)
10453 fprintf (file, _(" [relocatable executable]"));
10454
10455 if (flags & EF_ARM_HASENTRY)
10456 fprintf (file, _(" [has entry point]"));
10457
10458 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10459
10460 if (flags)
10461 fprintf (file, _("<Unrecognised flag bits set>"));
10462
10463 fputc ('\n', file);
10464
10465 return TRUE;
10466 }
10467
10468 static int
10469 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10470 {
10471 switch (ELF_ST_TYPE (elf_sym->st_info))
10472 {
10473 case STT_ARM_TFUNC:
10474 return ELF_ST_TYPE (elf_sym->st_info);
10475
10476 case STT_ARM_16BIT:
10477 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10478 This allows us to distinguish between data used by Thumb instructions
10479 and non-data (which is probably code) inside Thumb regions of an
10480 executable. */
10481 if (type != STT_OBJECT && type != STT_TLS)
10482 return ELF_ST_TYPE (elf_sym->st_info);
10483 break;
10484
10485 default:
10486 break;
10487 }
10488
10489 return type;
10490 }
10491
10492 static asection *
10493 elf32_arm_gc_mark_hook (asection *sec,
10494 struct bfd_link_info *info,
10495 Elf_Internal_Rela *rel,
10496 struct elf_link_hash_entry *h,
10497 Elf_Internal_Sym *sym)
10498 {
10499 if (h != NULL)
10500 switch (ELF32_R_TYPE (rel->r_info))
10501 {
10502 case R_ARM_GNU_VTINHERIT:
10503 case R_ARM_GNU_VTENTRY:
10504 return NULL;
10505 }
10506
10507 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10508 }
10509
10510 /* Update the got entry reference counts for the section being removed. */
10511
10512 static bfd_boolean
10513 elf32_arm_gc_sweep_hook (bfd * abfd,
10514 struct bfd_link_info * info,
10515 asection * sec,
10516 const Elf_Internal_Rela * relocs)
10517 {
10518 Elf_Internal_Shdr *symtab_hdr;
10519 struct elf_link_hash_entry **sym_hashes;
10520 bfd_signed_vma *local_got_refcounts;
10521 const Elf_Internal_Rela *rel, *relend;
10522 struct elf32_arm_link_hash_table * globals;
10523
10524 if (info->relocatable)
10525 return TRUE;
10526
10527 globals = elf32_arm_hash_table (info);
10528
10529 elf_section_data (sec)->local_dynrel = NULL;
10530
10531 symtab_hdr = & elf_symtab_hdr (abfd);
10532 sym_hashes = elf_sym_hashes (abfd);
10533 local_got_refcounts = elf_local_got_refcounts (abfd);
10534
10535 check_use_blx (globals);
10536
10537 relend = relocs + sec->reloc_count;
10538 for (rel = relocs; rel < relend; rel++)
10539 {
10540 unsigned long r_symndx;
10541 struct elf_link_hash_entry *h = NULL;
10542 int r_type;
10543
10544 r_symndx = ELF32_R_SYM (rel->r_info);
10545 if (r_symndx >= symtab_hdr->sh_info)
10546 {
10547 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10548 while (h->root.type == bfd_link_hash_indirect
10549 || h->root.type == bfd_link_hash_warning)
10550 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10551 }
10552
10553 r_type = ELF32_R_TYPE (rel->r_info);
10554 r_type = arm_real_reloc_type (globals, r_type);
10555 switch (r_type)
10556 {
10557 case R_ARM_GOT32:
10558 case R_ARM_GOT_PREL:
10559 case R_ARM_TLS_GD32:
10560 case R_ARM_TLS_IE32:
10561 if (h != NULL)
10562 {
10563 if (h->got.refcount > 0)
10564 h->got.refcount -= 1;
10565 }
10566 else if (local_got_refcounts != NULL)
10567 {
10568 if (local_got_refcounts[r_symndx] > 0)
10569 local_got_refcounts[r_symndx] -= 1;
10570 }
10571 break;
10572
10573 case R_ARM_TLS_LDM32:
10574 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10575 break;
10576
10577 case R_ARM_ABS32:
10578 case R_ARM_ABS32_NOI:
10579 case R_ARM_REL32:
10580 case R_ARM_REL32_NOI:
10581 case R_ARM_PC24:
10582 case R_ARM_PLT32:
10583 case R_ARM_CALL:
10584 case R_ARM_JUMP24:
10585 case R_ARM_PREL31:
10586 case R_ARM_THM_CALL:
10587 case R_ARM_THM_JUMP24:
10588 case R_ARM_THM_JUMP19:
10589 case R_ARM_MOVW_ABS_NC:
10590 case R_ARM_MOVT_ABS:
10591 case R_ARM_MOVW_PREL_NC:
10592 case R_ARM_MOVT_PREL:
10593 case R_ARM_THM_MOVW_ABS_NC:
10594 case R_ARM_THM_MOVT_ABS:
10595 case R_ARM_THM_MOVW_PREL_NC:
10596 case R_ARM_THM_MOVT_PREL:
10597 /* Should the interworking branches be here also? */
10598
10599 if (h != NULL)
10600 {
10601 struct elf32_arm_link_hash_entry *eh;
10602 struct elf32_arm_relocs_copied **pp;
10603 struct elf32_arm_relocs_copied *p;
10604
10605 eh = (struct elf32_arm_link_hash_entry *) h;
10606
10607 if (h->plt.refcount > 0)
10608 {
10609 h->plt.refcount -= 1;
10610 if (r_type == R_ARM_THM_CALL)
10611 eh->plt_maybe_thumb_refcount--;
10612
10613 if (r_type == R_ARM_THM_JUMP24
10614 || r_type == R_ARM_THM_JUMP19)
10615 eh->plt_thumb_refcount--;
10616 }
10617
10618 if (r_type == R_ARM_ABS32
10619 || r_type == R_ARM_REL32
10620 || r_type == R_ARM_ABS32_NOI
10621 || r_type == R_ARM_REL32_NOI)
10622 {
10623 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10624 pp = &p->next)
10625 if (p->section == sec)
10626 {
10627 p->count -= 1;
10628 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10629 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10630 p->pc_count -= 1;
10631 if (p->count == 0)
10632 *pp = p->next;
10633 break;
10634 }
10635 }
10636 }
10637 break;
10638
10639 default:
10640 break;
10641 }
10642 }
10643
10644 return TRUE;
10645 }
10646
10647 /* Look through the relocs for a section during the first phase. */
10648
10649 static bfd_boolean
10650 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10651 asection *sec, const Elf_Internal_Rela *relocs)
10652 {
10653 Elf_Internal_Shdr *symtab_hdr;
10654 struct elf_link_hash_entry **sym_hashes;
10655 const Elf_Internal_Rela *rel;
10656 const Elf_Internal_Rela *rel_end;
10657 bfd *dynobj;
10658 asection *sreloc;
10659 bfd_vma *local_got_offsets;
10660 struct elf32_arm_link_hash_table *htab;
10661 bfd_boolean needs_plt;
10662 unsigned long nsyms;
10663
10664 if (info->relocatable)
10665 return TRUE;
10666
10667 BFD_ASSERT (is_arm_elf (abfd));
10668
10669 htab = elf32_arm_hash_table (info);
10670 sreloc = NULL;
10671
10672 /* Create dynamic sections for relocatable executables so that we can
10673 copy relocations. */
10674 if (htab->root.is_relocatable_executable
10675 && ! htab->root.dynamic_sections_created)
10676 {
10677 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10678 return FALSE;
10679 }
10680
10681 dynobj = elf_hash_table (info)->dynobj;
10682 local_got_offsets = elf_local_got_offsets (abfd);
10683
10684 symtab_hdr = & elf_symtab_hdr (abfd);
10685 sym_hashes = elf_sym_hashes (abfd);
10686 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10687
10688 rel_end = relocs + sec->reloc_count;
10689 for (rel = relocs; rel < rel_end; rel++)
10690 {
10691 struct elf_link_hash_entry *h;
10692 struct elf32_arm_link_hash_entry *eh;
10693 unsigned long r_symndx;
10694 int r_type;
10695
10696 r_symndx = ELF32_R_SYM (rel->r_info);
10697 r_type = ELF32_R_TYPE (rel->r_info);
10698 r_type = arm_real_reloc_type (htab, r_type);
10699
10700 if (r_symndx >= nsyms
10701 /* PR 9934: It is possible to have relocations that do not
10702 refer to symbols, thus it is also possible to have an
10703 object file containing relocations but no symbol table. */
10704 && (r_symndx > 0 || nsyms > 0))
10705 {
10706 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10707 r_symndx);
10708 return FALSE;
10709 }
10710
10711 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10712 h = NULL;
10713 else
10714 {
10715 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10716 while (h->root.type == bfd_link_hash_indirect
10717 || h->root.type == bfd_link_hash_warning)
10718 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10719 }
10720
10721 eh = (struct elf32_arm_link_hash_entry *) h;
10722
10723 switch (r_type)
10724 {
10725 case R_ARM_GOT32:
10726 case R_ARM_GOT_PREL:
10727 case R_ARM_TLS_GD32:
10728 case R_ARM_TLS_IE32:
10729 /* This symbol requires a global offset table entry. */
10730 {
10731 int tls_type, old_tls_type;
10732
10733 switch (r_type)
10734 {
10735 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10736 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10737 default: tls_type = GOT_NORMAL; break;
10738 }
10739
10740 if (h != NULL)
10741 {
10742 h->got.refcount++;
10743 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10744 }
10745 else
10746 {
10747 bfd_signed_vma *local_got_refcounts;
10748
10749 /* This is a global offset table entry for a local symbol. */
10750 local_got_refcounts = elf_local_got_refcounts (abfd);
10751 if (local_got_refcounts == NULL)
10752 {
10753 bfd_size_type size;
10754
10755 size = symtab_hdr->sh_info;
10756 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10757 local_got_refcounts = bfd_zalloc (abfd, size);
10758 if (local_got_refcounts == NULL)
10759 return FALSE;
10760 elf_local_got_refcounts (abfd) = local_got_refcounts;
10761 elf32_arm_local_got_tls_type (abfd)
10762 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10763 }
10764 local_got_refcounts[r_symndx] += 1;
10765 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10766 }
10767
10768 /* We will already have issued an error message if there is a
10769 TLS / non-TLS mismatch, based on the symbol type. We don't
10770 support any linker relaxations. So just combine any TLS
10771 types needed. */
10772 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10773 && tls_type != GOT_NORMAL)
10774 tls_type |= old_tls_type;
10775
10776 if (old_tls_type != tls_type)
10777 {
10778 if (h != NULL)
10779 elf32_arm_hash_entry (h)->tls_type = tls_type;
10780 else
10781 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10782 }
10783 }
10784 /* Fall through. */
10785
10786 case R_ARM_TLS_LDM32:
10787 if (r_type == R_ARM_TLS_LDM32)
10788 htab->tls_ldm_got.refcount++;
10789 /* Fall through. */
10790
10791 case R_ARM_GOTOFF32:
10792 case R_ARM_GOTPC:
10793 if (htab->sgot == NULL)
10794 {
10795 if (htab->root.dynobj == NULL)
10796 htab->root.dynobj = abfd;
10797 if (!create_got_section (htab->root.dynobj, info))
10798 return FALSE;
10799 }
10800 break;
10801
10802 case R_ARM_ABS12:
10803 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10804 ldr __GOTT_INDEX__ offsets. */
10805 if (!htab->vxworks_p)
10806 break;
10807 /* Fall through. */
10808
10809 case R_ARM_PC24:
10810 case R_ARM_PLT32:
10811 case R_ARM_CALL:
10812 case R_ARM_JUMP24:
10813 case R_ARM_PREL31:
10814 case R_ARM_THM_CALL:
10815 case R_ARM_THM_JUMP24:
10816 case R_ARM_THM_JUMP19:
10817 needs_plt = 1;
10818 goto normal_reloc;
10819
10820 case R_ARM_MOVW_ABS_NC:
10821 case R_ARM_MOVT_ABS:
10822 case R_ARM_THM_MOVW_ABS_NC:
10823 case R_ARM_THM_MOVT_ABS:
10824 if (info->shared)
10825 {
10826 (*_bfd_error_handler)
10827 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10828 abfd, elf32_arm_howto_table_1[r_type].name,
10829 (h) ? h->root.root.string : "a local symbol");
10830 bfd_set_error (bfd_error_bad_value);
10831 return FALSE;
10832 }
10833
10834 /* Fall through. */
10835 case R_ARM_ABS32:
10836 case R_ARM_ABS32_NOI:
10837 case R_ARM_REL32:
10838 case R_ARM_REL32_NOI:
10839 case R_ARM_MOVW_PREL_NC:
10840 case R_ARM_MOVT_PREL:
10841 case R_ARM_THM_MOVW_PREL_NC:
10842 case R_ARM_THM_MOVT_PREL:
10843 needs_plt = 0;
10844 normal_reloc:
10845
10846 /* Should the interworking branches be listed here? */
10847 if (h != NULL)
10848 {
10849 /* If this reloc is in a read-only section, we might
10850 need a copy reloc. We can't check reliably at this
10851 stage whether the section is read-only, as input
10852 sections have not yet been mapped to output sections.
10853 Tentatively set the flag for now, and correct in
10854 adjust_dynamic_symbol. */
10855 if (!info->shared)
10856 h->non_got_ref = 1;
10857
10858 /* We may need a .plt entry if the function this reloc
10859 refers to is in a different object. We can't tell for
10860 sure yet, because something later might force the
10861 symbol local. */
10862 if (needs_plt)
10863 h->needs_plt = 1;
10864
10865 /* If we create a PLT entry, this relocation will reference
10866 it, even if it's an ABS32 relocation. */
10867 h->plt.refcount += 1;
10868
10869 /* It's too early to use htab->use_blx here, so we have to
10870 record possible blx references separately from
10871 relocs that definitely need a thumb stub. */
10872
10873 if (r_type == R_ARM_THM_CALL)
10874 eh->plt_maybe_thumb_refcount += 1;
10875
10876 if (r_type == R_ARM_THM_JUMP24
10877 || r_type == R_ARM_THM_JUMP19)
10878 eh->plt_thumb_refcount += 1;
10879 }
10880
10881 /* If we are creating a shared library or relocatable executable,
10882 and this is a reloc against a global symbol, or a non PC
10883 relative reloc against a local symbol, then we need to copy
10884 the reloc into the shared library. However, if we are linking
10885 with -Bsymbolic, we do not need to copy a reloc against a
10886 global symbol which is defined in an object we are
10887 including in the link (i.e., DEF_REGULAR is set). At
10888 this point we have not seen all the input files, so it is
10889 possible that DEF_REGULAR is not set now but will be set
10890 later (it is never cleared). We account for that
10891 possibility below by storing information in the
10892 relocs_copied field of the hash table entry. */
10893 if ((info->shared || htab->root.is_relocatable_executable)
10894 && (sec->flags & SEC_ALLOC) != 0
10895 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10896 || (h != NULL && ! h->needs_plt
10897 && (! info->symbolic || ! h->def_regular))))
10898 {
10899 struct elf32_arm_relocs_copied *p, **head;
10900
10901 /* When creating a shared object, we must copy these
10902 reloc types into the output file. We create a reloc
10903 section in dynobj and make room for this reloc. */
10904 if (sreloc == NULL)
10905 {
10906 sreloc = _bfd_elf_make_dynamic_reloc_section
10907 (sec, dynobj, 2, abfd, ! htab->use_rel);
10908
10909 if (sreloc == NULL)
10910 return FALSE;
10911
10912 /* BPABI objects never have dynamic relocations mapped. */
10913 if (htab->symbian_p)
10914 {
10915 flagword flags;
10916
10917 flags = bfd_get_section_flags (dynobj, sreloc);
10918 flags &= ~(SEC_LOAD | SEC_ALLOC);
10919 bfd_set_section_flags (dynobj, sreloc, flags);
10920 }
10921 }
10922
10923 /* If this is a global symbol, we count the number of
10924 relocations we need for this symbol. */
10925 if (h != NULL)
10926 {
10927 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10928 }
10929 else
10930 {
10931 /* Track dynamic relocs needed for local syms too.
10932 We really need local syms available to do this
10933 easily. Oh well. */
10934 asection *s;
10935 void *vpp;
10936 Elf_Internal_Sym *isym;
10937
10938 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10939 abfd, r_symndx);
10940 if (isym == NULL)
10941 return FALSE;
10942
10943 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10944 if (s == NULL)
10945 s = sec;
10946
10947 vpp = &elf_section_data (s)->local_dynrel;
10948 head = (struct elf32_arm_relocs_copied **) vpp;
10949 }
10950
10951 p = *head;
10952 if (p == NULL || p->section != sec)
10953 {
10954 bfd_size_type amt = sizeof *p;
10955
10956 p = bfd_alloc (htab->root.dynobj, amt);
10957 if (p == NULL)
10958 return FALSE;
10959 p->next = *head;
10960 *head = p;
10961 p->section = sec;
10962 p->count = 0;
10963 p->pc_count = 0;
10964 }
10965
10966 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10967 p->pc_count += 1;
10968 p->count += 1;
10969 }
10970 break;
10971
10972 /* This relocation describes the C++ object vtable hierarchy.
10973 Reconstruct it for later use during GC. */
10974 case R_ARM_GNU_VTINHERIT:
10975 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10976 return FALSE;
10977 break;
10978
10979 /* This relocation describes which C++ vtable entries are actually
10980 used. Record for later use during GC. */
10981 case R_ARM_GNU_VTENTRY:
10982 BFD_ASSERT (h != NULL);
10983 if (h != NULL
10984 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10985 return FALSE;
10986 break;
10987 }
10988 }
10989
10990 return TRUE;
10991 }
10992
10993 /* Unwinding tables are not referenced directly. This pass marks them as
10994 required if the corresponding code section is marked. */
10995
10996 static bfd_boolean
10997 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10998 elf_gc_mark_hook_fn gc_mark_hook)
10999 {
11000 bfd *sub;
11001 Elf_Internal_Shdr **elf_shdrp;
11002 bfd_boolean again;
11003
11004 /* Marking EH data may cause additional code sections to be marked,
11005 requiring multiple passes. */
11006 again = TRUE;
11007 while (again)
11008 {
11009 again = FALSE;
11010 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11011 {
11012 asection *o;
11013
11014 if (! is_arm_elf (sub))
11015 continue;
11016
11017 elf_shdrp = elf_elfsections (sub);
11018 for (o = sub->sections; o != NULL; o = o->next)
11019 {
11020 Elf_Internal_Shdr *hdr;
11021
11022 hdr = &elf_section_data (o)->this_hdr;
11023 if (hdr->sh_type == SHT_ARM_EXIDX
11024 && hdr->sh_link
11025 && hdr->sh_link < elf_numsections (sub)
11026 && !o->gc_mark
11027 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11028 {
11029 again = TRUE;
11030 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11031 return FALSE;
11032 }
11033 }
11034 }
11035 }
11036
11037 return TRUE;
11038 }
11039
11040 /* Treat mapping symbols as special target symbols. */
11041
11042 static bfd_boolean
11043 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11044 {
11045 return bfd_is_arm_special_symbol_name (sym->name,
11046 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11047 }
11048
11049 /* This is a copy of elf_find_function() from elf.c except that
11050 ARM mapping symbols are ignored when looking for function names
11051 and STT_ARM_TFUNC is considered to a function type. */
11052
11053 static bfd_boolean
11054 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11055 asection * section,
11056 asymbol ** symbols,
11057 bfd_vma offset,
11058 const char ** filename_ptr,
11059 const char ** functionname_ptr)
11060 {
11061 const char * filename = NULL;
11062 asymbol * func = NULL;
11063 bfd_vma low_func = 0;
11064 asymbol ** p;
11065
11066 for (p = symbols; *p != NULL; p++)
11067 {
11068 elf_symbol_type *q;
11069
11070 q = (elf_symbol_type *) *p;
11071
11072 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11073 {
11074 default:
11075 break;
11076 case STT_FILE:
11077 filename = bfd_asymbol_name (&q->symbol);
11078 break;
11079 case STT_FUNC:
11080 case STT_ARM_TFUNC:
11081 case STT_NOTYPE:
11082 /* Skip mapping symbols. */
11083 if ((q->symbol.flags & BSF_LOCAL)
11084 && bfd_is_arm_special_symbol_name (q->symbol.name,
11085 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11086 continue;
11087 /* Fall through. */
11088 if (bfd_get_section (&q->symbol) == section
11089 && q->symbol.value >= low_func
11090 && q->symbol.value <= offset)
11091 {
11092 func = (asymbol *) q;
11093 low_func = q->symbol.value;
11094 }
11095 break;
11096 }
11097 }
11098
11099 if (func == NULL)
11100 return FALSE;
11101
11102 if (filename_ptr)
11103 *filename_ptr = filename;
11104 if (functionname_ptr)
11105 *functionname_ptr = bfd_asymbol_name (func);
11106
11107 return TRUE;
11108 }
11109
11110
11111 /* Find the nearest line to a particular section and offset, for error
11112 reporting. This code is a duplicate of the code in elf.c, except
11113 that it uses arm_elf_find_function. */
11114
11115 static bfd_boolean
11116 elf32_arm_find_nearest_line (bfd * abfd,
11117 asection * section,
11118 asymbol ** symbols,
11119 bfd_vma offset,
11120 const char ** filename_ptr,
11121 const char ** functionname_ptr,
11122 unsigned int * line_ptr)
11123 {
11124 bfd_boolean found = FALSE;
11125
11126 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11127
11128 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11129 filename_ptr, functionname_ptr,
11130 line_ptr, 0,
11131 & elf_tdata (abfd)->dwarf2_find_line_info))
11132 {
11133 if (!*functionname_ptr)
11134 arm_elf_find_function (abfd, section, symbols, offset,
11135 *filename_ptr ? NULL : filename_ptr,
11136 functionname_ptr);
11137
11138 return TRUE;
11139 }
11140
11141 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11142 & found, filename_ptr,
11143 functionname_ptr, line_ptr,
11144 & elf_tdata (abfd)->line_info))
11145 return FALSE;
11146
11147 if (found && (*functionname_ptr || *line_ptr))
11148 return TRUE;
11149
11150 if (symbols == NULL)
11151 return FALSE;
11152
11153 if (! arm_elf_find_function (abfd, section, symbols, offset,
11154 filename_ptr, functionname_ptr))
11155 return FALSE;
11156
11157 *line_ptr = 0;
11158 return TRUE;
11159 }
11160
11161 static bfd_boolean
11162 elf32_arm_find_inliner_info (bfd * abfd,
11163 const char ** filename_ptr,
11164 const char ** functionname_ptr,
11165 unsigned int * line_ptr)
11166 {
11167 bfd_boolean found;
11168 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11169 functionname_ptr, line_ptr,
11170 & elf_tdata (abfd)->dwarf2_find_line_info);
11171 return found;
11172 }
11173
11174 /* Adjust a symbol defined by a dynamic object and referenced by a
11175 regular object. The current definition is in some section of the
11176 dynamic object, but we're not including those sections. We have to
11177 change the definition to something the rest of the link can
11178 understand. */
11179
11180 static bfd_boolean
11181 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11182 struct elf_link_hash_entry * h)
11183 {
11184 bfd * dynobj;
11185 asection * s;
11186 struct elf32_arm_link_hash_entry * eh;
11187 struct elf32_arm_link_hash_table *globals;
11188
11189 globals = elf32_arm_hash_table (info);
11190 dynobj = elf_hash_table (info)->dynobj;
11191
11192 /* Make sure we know what is going on here. */
11193 BFD_ASSERT (dynobj != NULL
11194 && (h->needs_plt
11195 || h->u.weakdef != NULL
11196 || (h->def_dynamic
11197 && h->ref_regular
11198 && !h->def_regular)));
11199
11200 eh = (struct elf32_arm_link_hash_entry *) h;
11201
11202 /* If this is a function, put it in the procedure linkage table. We
11203 will fill in the contents of the procedure linkage table later,
11204 when we know the address of the .got section. */
11205 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11206 || h->needs_plt)
11207 {
11208 if (h->plt.refcount <= 0
11209 || SYMBOL_CALLS_LOCAL (info, h)
11210 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11211 && h->root.type == bfd_link_hash_undefweak))
11212 {
11213 /* This case can occur if we saw a PLT32 reloc in an input
11214 file, but the symbol was never referred to by a dynamic
11215 object, or if all references were garbage collected. In
11216 such a case, we don't actually need to build a procedure
11217 linkage table, and we can just do a PC24 reloc instead. */
11218 h->plt.offset = (bfd_vma) -1;
11219 eh->plt_thumb_refcount = 0;
11220 eh->plt_maybe_thumb_refcount = 0;
11221 h->needs_plt = 0;
11222 }
11223
11224 return TRUE;
11225 }
11226 else
11227 {
11228 /* It's possible that we incorrectly decided a .plt reloc was
11229 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11230 in check_relocs. We can't decide accurately between function
11231 and non-function syms in check-relocs; Objects loaded later in
11232 the link may change h->type. So fix it now. */
11233 h->plt.offset = (bfd_vma) -1;
11234 eh->plt_thumb_refcount = 0;
11235 eh->plt_maybe_thumb_refcount = 0;
11236 }
11237
11238 /* If this is a weak symbol, and there is a real definition, the
11239 processor independent code will have arranged for us to see the
11240 real definition first, and we can just use the same value. */
11241 if (h->u.weakdef != NULL)
11242 {
11243 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11244 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11245 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11246 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11247 return TRUE;
11248 }
11249
11250 /* If there are no non-GOT references, we do not need a copy
11251 relocation. */
11252 if (!h->non_got_ref)
11253 return TRUE;
11254
11255 /* This is a reference to a symbol defined by a dynamic object which
11256 is not a function. */
11257
11258 /* If we are creating a shared library, we must presume that the
11259 only references to the symbol are via the global offset table.
11260 For such cases we need not do anything here; the relocations will
11261 be handled correctly by relocate_section. Relocatable executables
11262 can reference data in shared objects directly, so we don't need to
11263 do anything here. */
11264 if (info->shared || globals->root.is_relocatable_executable)
11265 return TRUE;
11266
11267 if (h->size == 0)
11268 {
11269 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11270 h->root.root.string);
11271 return TRUE;
11272 }
11273
11274 /* We must allocate the symbol in our .dynbss section, which will
11275 become part of the .bss section of the executable. There will be
11276 an entry for this symbol in the .dynsym section. The dynamic
11277 object will contain position independent code, so all references
11278 from the dynamic object to this symbol will go through the global
11279 offset table. The dynamic linker will use the .dynsym entry to
11280 determine the address it must put in the global offset table, so
11281 both the dynamic object and the regular object will refer to the
11282 same memory location for the variable. */
11283 s = bfd_get_section_by_name (dynobj, ".dynbss");
11284 BFD_ASSERT (s != NULL);
11285
11286 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11287 copy the initial value out of the dynamic object and into the
11288 runtime process image. We need to remember the offset into the
11289 .rel(a).bss section we are going to use. */
11290 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11291 {
11292 asection *srel;
11293
11294 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11295 BFD_ASSERT (srel != NULL);
11296 srel->size += RELOC_SIZE (globals);
11297 h->needs_copy = 1;
11298 }
11299
11300 return _bfd_elf_adjust_dynamic_copy (h, s);
11301 }
11302
11303 /* Allocate space in .plt, .got and associated reloc sections for
11304 dynamic relocs. */
11305
11306 static bfd_boolean
11307 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11308 {
11309 struct bfd_link_info *info;
11310 struct elf32_arm_link_hash_table *htab;
11311 struct elf32_arm_link_hash_entry *eh;
11312 struct elf32_arm_relocs_copied *p;
11313 bfd_signed_vma thumb_refs;
11314
11315 eh = (struct elf32_arm_link_hash_entry *) h;
11316
11317 if (h->root.type == bfd_link_hash_indirect)
11318 return TRUE;
11319
11320 if (h->root.type == bfd_link_hash_warning)
11321 /* When warning symbols are created, they **replace** the "real"
11322 entry in the hash table, thus we never get to see the real
11323 symbol in a hash traversal. So look at it now. */
11324 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11325
11326 info = (struct bfd_link_info *) inf;
11327 htab = elf32_arm_hash_table (info);
11328
11329 if (htab->root.dynamic_sections_created
11330 && h->plt.refcount > 0)
11331 {
11332 /* Make sure this symbol is output as a dynamic symbol.
11333 Undefined weak syms won't yet be marked as dynamic. */
11334 if (h->dynindx == -1
11335 && !h->forced_local)
11336 {
11337 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11338 return FALSE;
11339 }
11340
11341 if (info->shared
11342 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11343 {
11344 asection *s = htab->splt;
11345
11346 /* If this is the first .plt entry, make room for the special
11347 first entry. */
11348 if (s->size == 0)
11349 s->size += htab->plt_header_size;
11350
11351 h->plt.offset = s->size;
11352
11353 /* If we will insert a Thumb trampoline before this PLT, leave room
11354 for it. */
11355 thumb_refs = eh->plt_thumb_refcount;
11356 if (!htab->use_blx)
11357 thumb_refs += eh->plt_maybe_thumb_refcount;
11358
11359 if (thumb_refs > 0)
11360 {
11361 h->plt.offset += PLT_THUMB_STUB_SIZE;
11362 s->size += PLT_THUMB_STUB_SIZE;
11363 }
11364
11365 /* If this symbol is not defined in a regular file, and we are
11366 not generating a shared library, then set the symbol to this
11367 location in the .plt. This is required to make function
11368 pointers compare as equal between the normal executable and
11369 the shared library. */
11370 if (! info->shared
11371 && !h->def_regular)
11372 {
11373 h->root.u.def.section = s;
11374 h->root.u.def.value = h->plt.offset;
11375 }
11376
11377 /* Make sure the function is not marked as Thumb, in case
11378 it is the target of an ABS32 relocation, which will
11379 point to the PLT entry. */
11380 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11381 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11382
11383 /* Make room for this entry. */
11384 s->size += htab->plt_entry_size;
11385
11386 if (!htab->symbian_p)
11387 {
11388 /* We also need to make an entry in the .got.plt section, which
11389 will be placed in the .got section by the linker script. */
11390 eh->plt_got_offset = htab->sgotplt->size;
11391 htab->sgotplt->size += 4;
11392 }
11393
11394 /* We also need to make an entry in the .rel(a).plt section. */
11395 htab->srelplt->size += RELOC_SIZE (htab);
11396
11397 /* VxWorks executables have a second set of relocations for
11398 each PLT entry. They go in a separate relocation section,
11399 which is processed by the kernel loader. */
11400 if (htab->vxworks_p && !info->shared)
11401 {
11402 /* There is a relocation for the initial PLT entry:
11403 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11404 if (h->plt.offset == htab->plt_header_size)
11405 htab->srelplt2->size += RELOC_SIZE (htab);
11406
11407 /* There are two extra relocations for each subsequent
11408 PLT entry: an R_ARM_32 relocation for the GOT entry,
11409 and an R_ARM_32 relocation for the PLT entry. */
11410 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11411 }
11412 }
11413 else
11414 {
11415 h->plt.offset = (bfd_vma) -1;
11416 h->needs_plt = 0;
11417 }
11418 }
11419 else
11420 {
11421 h->plt.offset = (bfd_vma) -1;
11422 h->needs_plt = 0;
11423 }
11424
11425 if (h->got.refcount > 0)
11426 {
11427 asection *s;
11428 bfd_boolean dyn;
11429 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11430 int indx;
11431
11432 /* Make sure this symbol is output as a dynamic symbol.
11433 Undefined weak syms won't yet be marked as dynamic. */
11434 if (h->dynindx == -1
11435 && !h->forced_local)
11436 {
11437 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11438 return FALSE;
11439 }
11440
11441 if (!htab->symbian_p)
11442 {
11443 s = htab->sgot;
11444 h->got.offset = s->size;
11445
11446 if (tls_type == GOT_UNKNOWN)
11447 abort ();
11448
11449 if (tls_type == GOT_NORMAL)
11450 /* Non-TLS symbols need one GOT slot. */
11451 s->size += 4;
11452 else
11453 {
11454 if (tls_type & GOT_TLS_GD)
11455 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11456 s->size += 8;
11457 if (tls_type & GOT_TLS_IE)
11458 /* R_ARM_TLS_IE32 needs one GOT slot. */
11459 s->size += 4;
11460 }
11461
11462 dyn = htab->root.dynamic_sections_created;
11463
11464 indx = 0;
11465 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11466 && (!info->shared
11467 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11468 indx = h->dynindx;
11469
11470 if (tls_type != GOT_NORMAL
11471 && (info->shared || indx != 0)
11472 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11473 || h->root.type != bfd_link_hash_undefweak))
11474 {
11475 if (tls_type & GOT_TLS_IE)
11476 htab->srelgot->size += RELOC_SIZE (htab);
11477
11478 if (tls_type & GOT_TLS_GD)
11479 htab->srelgot->size += RELOC_SIZE (htab);
11480
11481 if ((tls_type & GOT_TLS_GD) && indx != 0)
11482 htab->srelgot->size += RELOC_SIZE (htab);
11483 }
11484 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11485 || h->root.type != bfd_link_hash_undefweak)
11486 && (info->shared
11487 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11488 htab->srelgot->size += RELOC_SIZE (htab);
11489 }
11490 }
11491 else
11492 h->got.offset = (bfd_vma) -1;
11493
11494 /* Allocate stubs for exported Thumb functions on v4t. */
11495 if (!htab->use_blx && h->dynindx != -1
11496 && h->def_regular
11497 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11498 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11499 {
11500 struct elf_link_hash_entry * th;
11501 struct bfd_link_hash_entry * bh;
11502 struct elf_link_hash_entry * myh;
11503 char name[1024];
11504 asection *s;
11505 bh = NULL;
11506 /* Create a new symbol to regist the real location of the function. */
11507 s = h->root.u.def.section;
11508 sprintf (name, "__real_%s", h->root.root.string);
11509 _bfd_generic_link_add_one_symbol (info, s->owner,
11510 name, BSF_GLOBAL, s,
11511 h->root.u.def.value,
11512 NULL, TRUE, FALSE, &bh);
11513
11514 myh = (struct elf_link_hash_entry *) bh;
11515 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11516 myh->forced_local = 1;
11517 eh->export_glue = myh;
11518 th = record_arm_to_thumb_glue (info, h);
11519 /* Point the symbol at the stub. */
11520 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11521 h->root.u.def.section = th->root.u.def.section;
11522 h->root.u.def.value = th->root.u.def.value & ~1;
11523 }
11524
11525 if (eh->relocs_copied == NULL)
11526 return TRUE;
11527
11528 /* In the shared -Bsymbolic case, discard space allocated for
11529 dynamic pc-relative relocs against symbols which turn out to be
11530 defined in regular objects. For the normal shared case, discard
11531 space for pc-relative relocs that have become local due to symbol
11532 visibility changes. */
11533
11534 if (info->shared || htab->root.is_relocatable_executable)
11535 {
11536 /* The only relocs that use pc_count are R_ARM_REL32 and
11537 R_ARM_REL32_NOI, which will appear on something like
11538 ".long foo - .". We want calls to protected symbols to resolve
11539 directly to the function rather than going via the plt. If people
11540 want function pointer comparisons to work as expected then they
11541 should avoid writing assembly like ".long foo - .". */
11542 if (SYMBOL_CALLS_LOCAL (info, h))
11543 {
11544 struct elf32_arm_relocs_copied **pp;
11545
11546 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11547 {
11548 p->count -= p->pc_count;
11549 p->pc_count = 0;
11550 if (p->count == 0)
11551 *pp = p->next;
11552 else
11553 pp = &p->next;
11554 }
11555 }
11556
11557 if (elf32_arm_hash_table (info)->vxworks_p)
11558 {
11559 struct elf32_arm_relocs_copied **pp;
11560
11561 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11562 {
11563 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11564 *pp = p->next;
11565 else
11566 pp = &p->next;
11567 }
11568 }
11569
11570 /* Also discard relocs on undefined weak syms with non-default
11571 visibility. */
11572 if (eh->relocs_copied != NULL
11573 && h->root.type == bfd_link_hash_undefweak)
11574 {
11575 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11576 eh->relocs_copied = NULL;
11577
11578 /* Make sure undefined weak symbols are output as a dynamic
11579 symbol in PIEs. */
11580 else if (h->dynindx == -1
11581 && !h->forced_local)
11582 {
11583 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11584 return FALSE;
11585 }
11586 }
11587
11588 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11589 && h->root.type == bfd_link_hash_new)
11590 {
11591 /* Output absolute symbols so that we can create relocations
11592 against them. For normal symbols we output a relocation
11593 against the section that contains them. */
11594 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11595 return FALSE;
11596 }
11597
11598 }
11599 else
11600 {
11601 /* For the non-shared case, discard space for relocs against
11602 symbols which turn out to need copy relocs or are not
11603 dynamic. */
11604
11605 if (!h->non_got_ref
11606 && ((h->def_dynamic
11607 && !h->def_regular)
11608 || (htab->root.dynamic_sections_created
11609 && (h->root.type == bfd_link_hash_undefweak
11610 || h->root.type == bfd_link_hash_undefined))))
11611 {
11612 /* Make sure this symbol is output as a dynamic symbol.
11613 Undefined weak syms won't yet be marked as dynamic. */
11614 if (h->dynindx == -1
11615 && !h->forced_local)
11616 {
11617 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11618 return FALSE;
11619 }
11620
11621 /* If that succeeded, we know we'll be keeping all the
11622 relocs. */
11623 if (h->dynindx != -1)
11624 goto keep;
11625 }
11626
11627 eh->relocs_copied = NULL;
11628
11629 keep: ;
11630 }
11631
11632 /* Finally, allocate space. */
11633 for (p = eh->relocs_copied; p != NULL; p = p->next)
11634 {
11635 asection *sreloc = elf_section_data (p->section)->sreloc;
11636 sreloc->size += p->count * RELOC_SIZE (htab);
11637 }
11638
11639 return TRUE;
11640 }
11641
11642 /* Find any dynamic relocs that apply to read-only sections. */
11643
11644 static bfd_boolean
11645 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11646 {
11647 struct elf32_arm_link_hash_entry * eh;
11648 struct elf32_arm_relocs_copied * p;
11649
11650 if (h->root.type == bfd_link_hash_warning)
11651 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11652
11653 eh = (struct elf32_arm_link_hash_entry *) h;
11654 for (p = eh->relocs_copied; p != NULL; p = p->next)
11655 {
11656 asection *s = p->section;
11657
11658 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11659 {
11660 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11661
11662 info->flags |= DF_TEXTREL;
11663
11664 /* Not an error, just cut short the traversal. */
11665 return FALSE;
11666 }
11667 }
11668 return TRUE;
11669 }
11670
11671 void
11672 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11673 int byteswap_code)
11674 {
11675 struct elf32_arm_link_hash_table *globals;
11676
11677 globals = elf32_arm_hash_table (info);
11678 globals->byteswap_code = byteswap_code;
11679 }
11680
11681 /* Set the sizes of the dynamic sections. */
11682
11683 static bfd_boolean
11684 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11685 struct bfd_link_info * info)
11686 {
11687 bfd * dynobj;
11688 asection * s;
11689 bfd_boolean plt;
11690 bfd_boolean relocs;
11691 bfd *ibfd;
11692 struct elf32_arm_link_hash_table *htab;
11693
11694 htab = elf32_arm_hash_table (info);
11695 dynobj = elf_hash_table (info)->dynobj;
11696 BFD_ASSERT (dynobj != NULL);
11697 check_use_blx (htab);
11698
11699 if (elf_hash_table (info)->dynamic_sections_created)
11700 {
11701 /* Set the contents of the .interp section to the interpreter. */
11702 if (info->executable)
11703 {
11704 s = bfd_get_section_by_name (dynobj, ".interp");
11705 BFD_ASSERT (s != NULL);
11706 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11707 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11708 }
11709 }
11710
11711 /* Set up .got offsets for local syms, and space for local dynamic
11712 relocs. */
11713 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11714 {
11715 bfd_signed_vma *local_got;
11716 bfd_signed_vma *end_local_got;
11717 char *local_tls_type;
11718 bfd_size_type locsymcount;
11719 Elf_Internal_Shdr *symtab_hdr;
11720 asection *srel;
11721 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11722
11723 if (! is_arm_elf (ibfd))
11724 continue;
11725
11726 for (s = ibfd->sections; s != NULL; s = s->next)
11727 {
11728 struct elf32_arm_relocs_copied *p;
11729
11730 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11731 {
11732 if (!bfd_is_abs_section (p->section)
11733 && bfd_is_abs_section (p->section->output_section))
11734 {
11735 /* Input section has been discarded, either because
11736 it is a copy of a linkonce section or due to
11737 linker script /DISCARD/, so we'll be discarding
11738 the relocs too. */
11739 }
11740 else if (is_vxworks
11741 && strcmp (p->section->output_section->name,
11742 ".tls_vars") == 0)
11743 {
11744 /* Relocations in vxworks .tls_vars sections are
11745 handled specially by the loader. */
11746 }
11747 else if (p->count != 0)
11748 {
11749 srel = elf_section_data (p->section)->sreloc;
11750 srel->size += p->count * RELOC_SIZE (htab);
11751 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11752 info->flags |= DF_TEXTREL;
11753 }
11754 }
11755 }
11756
11757 local_got = elf_local_got_refcounts (ibfd);
11758 if (!local_got)
11759 continue;
11760
11761 symtab_hdr = & elf_symtab_hdr (ibfd);
11762 locsymcount = symtab_hdr->sh_info;
11763 end_local_got = local_got + locsymcount;
11764 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11765 s = htab->sgot;
11766 srel = htab->srelgot;
11767 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11768 {
11769 if (*local_got > 0)
11770 {
11771 *local_got = s->size;
11772 if (*local_tls_type & GOT_TLS_GD)
11773 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11774 s->size += 8;
11775 if (*local_tls_type & GOT_TLS_IE)
11776 s->size += 4;
11777 if (*local_tls_type == GOT_NORMAL)
11778 s->size += 4;
11779
11780 if (info->shared || *local_tls_type == GOT_TLS_GD)
11781 srel->size += RELOC_SIZE (htab);
11782 }
11783 else
11784 *local_got = (bfd_vma) -1;
11785 }
11786 }
11787
11788 if (htab->tls_ldm_got.refcount > 0)
11789 {
11790 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11791 for R_ARM_TLS_LDM32 relocations. */
11792 htab->tls_ldm_got.offset = htab->sgot->size;
11793 htab->sgot->size += 8;
11794 if (info->shared)
11795 htab->srelgot->size += RELOC_SIZE (htab);
11796 }
11797 else
11798 htab->tls_ldm_got.offset = -1;
11799
11800 /* Allocate global sym .plt and .got entries, and space for global
11801 sym dynamic relocs. */
11802 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11803
11804 /* Here we rummage through the found bfds to collect glue information. */
11805 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11806 {
11807 if (! is_arm_elf (ibfd))
11808 continue;
11809
11810 /* Initialise mapping tables for code/data. */
11811 bfd_elf32_arm_init_maps (ibfd);
11812
11813 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11814 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11815 /* xgettext:c-format */
11816 _bfd_error_handler (_("Errors encountered processing file %s"),
11817 ibfd->filename);
11818 }
11819
11820 /* Allocate space for the glue sections now that we've sized them. */
11821 bfd_elf32_arm_allocate_interworking_sections (info);
11822
11823 /* The check_relocs and adjust_dynamic_symbol entry points have
11824 determined the sizes of the various dynamic sections. Allocate
11825 memory for them. */
11826 plt = FALSE;
11827 relocs = FALSE;
11828 for (s = dynobj->sections; s != NULL; s = s->next)
11829 {
11830 const char * name;
11831
11832 if ((s->flags & SEC_LINKER_CREATED) == 0)
11833 continue;
11834
11835 /* It's OK to base decisions on the section name, because none
11836 of the dynobj section names depend upon the input files. */
11837 name = bfd_get_section_name (dynobj, s);
11838
11839 if (strcmp (name, ".plt") == 0)
11840 {
11841 /* Remember whether there is a PLT. */
11842 plt = s->size != 0;
11843 }
11844 else if (CONST_STRNEQ (name, ".rel"))
11845 {
11846 if (s->size != 0)
11847 {
11848 /* Remember whether there are any reloc sections other
11849 than .rel(a).plt and .rela.plt.unloaded. */
11850 if (s != htab->srelplt && s != htab->srelplt2)
11851 relocs = TRUE;
11852
11853 /* We use the reloc_count field as a counter if we need
11854 to copy relocs into the output file. */
11855 s->reloc_count = 0;
11856 }
11857 }
11858 else if (! CONST_STRNEQ (name, ".got")
11859 && strcmp (name, ".dynbss") != 0)
11860 {
11861 /* It's not one of our sections, so don't allocate space. */
11862 continue;
11863 }
11864
11865 if (s->size == 0)
11866 {
11867 /* If we don't need this section, strip it from the
11868 output file. This is mostly to handle .rel(a).bss and
11869 .rel(a).plt. We must create both sections in
11870 create_dynamic_sections, because they must be created
11871 before the linker maps input sections to output
11872 sections. The linker does that before
11873 adjust_dynamic_symbol is called, and it is that
11874 function which decides whether anything needs to go
11875 into these sections. */
11876 s->flags |= SEC_EXCLUDE;
11877 continue;
11878 }
11879
11880 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11881 continue;
11882
11883 /* Allocate memory for the section contents. */
11884 s->contents = bfd_zalloc (dynobj, s->size);
11885 if (s->contents == NULL)
11886 return FALSE;
11887 }
11888
11889 if (elf_hash_table (info)->dynamic_sections_created)
11890 {
11891 /* Add some entries to the .dynamic section. We fill in the
11892 values later, in elf32_arm_finish_dynamic_sections, but we
11893 must add the entries now so that we get the correct size for
11894 the .dynamic section. The DT_DEBUG entry is filled in by the
11895 dynamic linker and used by the debugger. */
11896 #define add_dynamic_entry(TAG, VAL) \
11897 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11898
11899 if (info->executable)
11900 {
11901 if (!add_dynamic_entry (DT_DEBUG, 0))
11902 return FALSE;
11903 }
11904
11905 if (plt)
11906 {
11907 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11908 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11909 || !add_dynamic_entry (DT_PLTREL,
11910 htab->use_rel ? DT_REL : DT_RELA)
11911 || !add_dynamic_entry (DT_JMPREL, 0))
11912 return FALSE;
11913 }
11914
11915 if (relocs)
11916 {
11917 if (htab->use_rel)
11918 {
11919 if (!add_dynamic_entry (DT_REL, 0)
11920 || !add_dynamic_entry (DT_RELSZ, 0)
11921 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11922 return FALSE;
11923 }
11924 else
11925 {
11926 if (!add_dynamic_entry (DT_RELA, 0)
11927 || !add_dynamic_entry (DT_RELASZ, 0)
11928 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11929 return FALSE;
11930 }
11931 }
11932
11933 /* If any dynamic relocs apply to a read-only section,
11934 then we need a DT_TEXTREL entry. */
11935 if ((info->flags & DF_TEXTREL) == 0)
11936 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11937 info);
11938
11939 if ((info->flags & DF_TEXTREL) != 0)
11940 {
11941 if (!add_dynamic_entry (DT_TEXTREL, 0))
11942 return FALSE;
11943 }
11944 if (htab->vxworks_p
11945 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11946 return FALSE;
11947 }
11948 #undef add_dynamic_entry
11949
11950 return TRUE;
11951 }
11952
11953 /* Finish up dynamic symbol handling. We set the contents of various
11954 dynamic sections here. */
11955
11956 static bfd_boolean
11957 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11958 struct bfd_link_info * info,
11959 struct elf_link_hash_entry * h,
11960 Elf_Internal_Sym * sym)
11961 {
11962 bfd * dynobj;
11963 struct elf32_arm_link_hash_table *htab;
11964 struct elf32_arm_link_hash_entry *eh;
11965
11966 dynobj = elf_hash_table (info)->dynobj;
11967 htab = elf32_arm_hash_table (info);
11968 eh = (struct elf32_arm_link_hash_entry *) h;
11969
11970 if (h->plt.offset != (bfd_vma) -1)
11971 {
11972 asection * splt;
11973 asection * srel;
11974 bfd_byte *loc;
11975 bfd_vma plt_index;
11976 Elf_Internal_Rela rel;
11977
11978 /* This symbol has an entry in the procedure linkage table. Set
11979 it up. */
11980
11981 BFD_ASSERT (h->dynindx != -1);
11982
11983 splt = bfd_get_section_by_name (dynobj, ".plt");
11984 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11985 BFD_ASSERT (splt != NULL && srel != NULL);
11986
11987 /* Fill in the entry in the procedure linkage table. */
11988 if (htab->symbian_p)
11989 {
11990 put_arm_insn (htab, output_bfd,
11991 elf32_arm_symbian_plt_entry[0],
11992 splt->contents + h->plt.offset);
11993 bfd_put_32 (output_bfd,
11994 elf32_arm_symbian_plt_entry[1],
11995 splt->contents + h->plt.offset + 4);
11996
11997 /* Fill in the entry in the .rel.plt section. */
11998 rel.r_offset = (splt->output_section->vma
11999 + splt->output_offset
12000 + h->plt.offset + 4);
12001 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12002
12003 /* Get the index in the procedure linkage table which
12004 corresponds to this symbol. This is the index of this symbol
12005 in all the symbols for which we are making plt entries. The
12006 first entry in the procedure linkage table is reserved. */
12007 plt_index = ((h->plt.offset - htab->plt_header_size)
12008 / htab->plt_entry_size);
12009 }
12010 else
12011 {
12012 bfd_vma got_offset, got_address, plt_address;
12013 bfd_vma got_displacement;
12014 asection * sgot;
12015 bfd_byte * ptr;
12016
12017 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12018 BFD_ASSERT (sgot != NULL);
12019
12020 /* Get the offset into the .got.plt table of the entry that
12021 corresponds to this function. */
12022 got_offset = eh->plt_got_offset;
12023
12024 /* Get the index in the procedure linkage table which
12025 corresponds to this symbol. This is the index of this symbol
12026 in all the symbols for which we are making plt entries. The
12027 first three entries in .got.plt are reserved; after that
12028 symbols appear in the same order as in .plt. */
12029 plt_index = (got_offset - 12) / 4;
12030
12031 /* Calculate the address of the GOT entry. */
12032 got_address = (sgot->output_section->vma
12033 + sgot->output_offset
12034 + got_offset);
12035
12036 /* ...and the address of the PLT entry. */
12037 plt_address = (splt->output_section->vma
12038 + splt->output_offset
12039 + h->plt.offset);
12040
12041 ptr = htab->splt->contents + h->plt.offset;
12042 if (htab->vxworks_p && info->shared)
12043 {
12044 unsigned int i;
12045 bfd_vma val;
12046
12047 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12048 {
12049 val = elf32_arm_vxworks_shared_plt_entry[i];
12050 if (i == 2)
12051 val |= got_address - sgot->output_section->vma;
12052 if (i == 5)
12053 val |= plt_index * RELOC_SIZE (htab);
12054 if (i == 2 || i == 5)
12055 bfd_put_32 (output_bfd, val, ptr);
12056 else
12057 put_arm_insn (htab, output_bfd, val, ptr);
12058 }
12059 }
12060 else if (htab->vxworks_p)
12061 {
12062 unsigned int i;
12063 bfd_vma val;
12064
12065 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12066 {
12067 val = elf32_arm_vxworks_exec_plt_entry[i];
12068 if (i == 2)
12069 val |= got_address;
12070 if (i == 4)
12071 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12072 if (i == 5)
12073 val |= plt_index * RELOC_SIZE (htab);
12074 if (i == 2 || i == 5)
12075 bfd_put_32 (output_bfd, val, ptr);
12076 else
12077 put_arm_insn (htab, output_bfd, val, ptr);
12078 }
12079
12080 loc = (htab->srelplt2->contents
12081 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12082
12083 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12084 referencing the GOT for this PLT entry. */
12085 rel.r_offset = plt_address + 8;
12086 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12087 rel.r_addend = got_offset;
12088 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12089 loc += RELOC_SIZE (htab);
12090
12091 /* Create the R_ARM_ABS32 relocation referencing the
12092 beginning of the PLT for this GOT entry. */
12093 rel.r_offset = got_address;
12094 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12095 rel.r_addend = 0;
12096 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12097 }
12098 else
12099 {
12100 bfd_signed_vma thumb_refs;
12101 /* Calculate the displacement between the PLT slot and the
12102 entry in the GOT. The eight-byte offset accounts for the
12103 value produced by adding to pc in the first instruction
12104 of the PLT stub. */
12105 got_displacement = got_address - (plt_address + 8);
12106
12107 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12108
12109 thumb_refs = eh->plt_thumb_refcount;
12110 if (!htab->use_blx)
12111 thumb_refs += eh->plt_maybe_thumb_refcount;
12112
12113 if (thumb_refs > 0)
12114 {
12115 put_thumb_insn (htab, output_bfd,
12116 elf32_arm_plt_thumb_stub[0], ptr - 4);
12117 put_thumb_insn (htab, output_bfd,
12118 elf32_arm_plt_thumb_stub[1], ptr - 2);
12119 }
12120
12121 put_arm_insn (htab, output_bfd,
12122 elf32_arm_plt_entry[0]
12123 | ((got_displacement & 0x0ff00000) >> 20),
12124 ptr + 0);
12125 put_arm_insn (htab, output_bfd,
12126 elf32_arm_plt_entry[1]
12127 | ((got_displacement & 0x000ff000) >> 12),
12128 ptr+ 4);
12129 put_arm_insn (htab, output_bfd,
12130 elf32_arm_plt_entry[2]
12131 | (got_displacement & 0x00000fff),
12132 ptr + 8);
12133 #ifdef FOUR_WORD_PLT
12134 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12135 #endif
12136 }
12137
12138 /* Fill in the entry in the global offset table. */
12139 bfd_put_32 (output_bfd,
12140 (splt->output_section->vma
12141 + splt->output_offset),
12142 sgot->contents + got_offset);
12143
12144 /* Fill in the entry in the .rel(a).plt section. */
12145 rel.r_addend = 0;
12146 rel.r_offset = got_address;
12147 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12148 }
12149
12150 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12151 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12152
12153 if (!h->def_regular)
12154 {
12155 /* Mark the symbol as undefined, rather than as defined in
12156 the .plt section. Leave the value alone. */
12157 sym->st_shndx = SHN_UNDEF;
12158 /* If the symbol is weak, we do need to clear the value.
12159 Otherwise, the PLT entry would provide a definition for
12160 the symbol even if the symbol wasn't defined anywhere,
12161 and so the symbol would never be NULL. */
12162 if (!h->ref_regular_nonweak)
12163 sym->st_value = 0;
12164 }
12165 }
12166
12167 if (h->got.offset != (bfd_vma) -1
12168 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12169 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12170 {
12171 asection * sgot;
12172 asection * srel;
12173 Elf_Internal_Rela rel;
12174 bfd_byte *loc;
12175 bfd_vma offset;
12176
12177 /* This symbol has an entry in the global offset table. Set it
12178 up. */
12179 sgot = bfd_get_section_by_name (dynobj, ".got");
12180 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12181 BFD_ASSERT (sgot != NULL && srel != NULL);
12182
12183 offset = (h->got.offset & ~(bfd_vma) 1);
12184 rel.r_addend = 0;
12185 rel.r_offset = (sgot->output_section->vma
12186 + sgot->output_offset
12187 + offset);
12188
12189 /* If this is a static link, or it is a -Bsymbolic link and the
12190 symbol is defined locally or was forced to be local because
12191 of a version file, we just want to emit a RELATIVE reloc.
12192 The entry in the global offset table will already have been
12193 initialized in the relocate_section function. */
12194 if (info->shared
12195 && SYMBOL_REFERENCES_LOCAL (info, h))
12196 {
12197 BFD_ASSERT ((h->got.offset & 1) != 0);
12198 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12199 if (!htab->use_rel)
12200 {
12201 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12202 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12203 }
12204 }
12205 else
12206 {
12207 BFD_ASSERT ((h->got.offset & 1) == 0);
12208 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12209 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12210 }
12211
12212 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12213 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12214 }
12215
12216 if (h->needs_copy)
12217 {
12218 asection * s;
12219 Elf_Internal_Rela rel;
12220 bfd_byte *loc;
12221
12222 /* This symbol needs a copy reloc. Set it up. */
12223 BFD_ASSERT (h->dynindx != -1
12224 && (h->root.type == bfd_link_hash_defined
12225 || h->root.type == bfd_link_hash_defweak));
12226
12227 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12228 RELOC_SECTION (htab, ".bss"));
12229 BFD_ASSERT (s != NULL);
12230
12231 rel.r_addend = 0;
12232 rel.r_offset = (h->root.u.def.value
12233 + h->root.u.def.section->output_section->vma
12234 + h->root.u.def.section->output_offset);
12235 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12236 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12237 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12238 }
12239
12240 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12241 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12242 to the ".got" section. */
12243 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12244 || (!htab->vxworks_p && h == htab->root.hgot))
12245 sym->st_shndx = SHN_ABS;
12246
12247 return TRUE;
12248 }
12249
12250 /* Finish up the dynamic sections. */
12251
12252 static bfd_boolean
12253 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12254 {
12255 bfd * dynobj;
12256 asection * sgot;
12257 asection * sdyn;
12258
12259 dynobj = elf_hash_table (info)->dynobj;
12260
12261 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12262 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12263 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12264
12265 if (elf_hash_table (info)->dynamic_sections_created)
12266 {
12267 asection *splt;
12268 Elf32_External_Dyn *dyncon, *dynconend;
12269 struct elf32_arm_link_hash_table *htab;
12270
12271 htab = elf32_arm_hash_table (info);
12272 splt = bfd_get_section_by_name (dynobj, ".plt");
12273 BFD_ASSERT (splt != NULL && sdyn != NULL);
12274
12275 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12276 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12277
12278 for (; dyncon < dynconend; dyncon++)
12279 {
12280 Elf_Internal_Dyn dyn;
12281 const char * name;
12282 asection * s;
12283
12284 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12285
12286 switch (dyn.d_tag)
12287 {
12288 unsigned int type;
12289
12290 default:
12291 if (htab->vxworks_p
12292 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12293 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12294 break;
12295
12296 case DT_HASH:
12297 name = ".hash";
12298 goto get_vma_if_bpabi;
12299 case DT_STRTAB:
12300 name = ".dynstr";
12301 goto get_vma_if_bpabi;
12302 case DT_SYMTAB:
12303 name = ".dynsym";
12304 goto get_vma_if_bpabi;
12305 case DT_VERSYM:
12306 name = ".gnu.version";
12307 goto get_vma_if_bpabi;
12308 case DT_VERDEF:
12309 name = ".gnu.version_d";
12310 goto get_vma_if_bpabi;
12311 case DT_VERNEED:
12312 name = ".gnu.version_r";
12313 goto get_vma_if_bpabi;
12314
12315 case DT_PLTGOT:
12316 name = ".got";
12317 goto get_vma;
12318 case DT_JMPREL:
12319 name = RELOC_SECTION (htab, ".plt");
12320 get_vma:
12321 s = bfd_get_section_by_name (output_bfd, name);
12322 BFD_ASSERT (s != NULL);
12323 if (!htab->symbian_p)
12324 dyn.d_un.d_ptr = s->vma;
12325 else
12326 /* In the BPABI, tags in the PT_DYNAMIC section point
12327 at the file offset, not the memory address, for the
12328 convenience of the post linker. */
12329 dyn.d_un.d_ptr = s->filepos;
12330 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12331 break;
12332
12333 get_vma_if_bpabi:
12334 if (htab->symbian_p)
12335 goto get_vma;
12336 break;
12337
12338 case DT_PLTRELSZ:
12339 s = bfd_get_section_by_name (output_bfd,
12340 RELOC_SECTION (htab, ".plt"));
12341 BFD_ASSERT (s != NULL);
12342 dyn.d_un.d_val = s->size;
12343 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12344 break;
12345
12346 case DT_RELSZ:
12347 case DT_RELASZ:
12348 if (!htab->symbian_p)
12349 {
12350 /* My reading of the SVR4 ABI indicates that the
12351 procedure linkage table relocs (DT_JMPREL) should be
12352 included in the overall relocs (DT_REL). This is
12353 what Solaris does. However, UnixWare can not handle
12354 that case. Therefore, we override the DT_RELSZ entry
12355 here to make it not include the JMPREL relocs. Since
12356 the linker script arranges for .rel(a).plt to follow all
12357 other relocation sections, we don't have to worry
12358 about changing the DT_REL entry. */
12359 s = bfd_get_section_by_name (output_bfd,
12360 RELOC_SECTION (htab, ".plt"));
12361 if (s != NULL)
12362 dyn.d_un.d_val -= s->size;
12363 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12364 break;
12365 }
12366 /* Fall through. */
12367
12368 case DT_REL:
12369 case DT_RELA:
12370 /* In the BPABI, the DT_REL tag must point at the file
12371 offset, not the VMA, of the first relocation
12372 section. So, we use code similar to that in
12373 elflink.c, but do not check for SHF_ALLOC on the
12374 relcoation section, since relocations sections are
12375 never allocated under the BPABI. The comments above
12376 about Unixware notwithstanding, we include all of the
12377 relocations here. */
12378 if (htab->symbian_p)
12379 {
12380 unsigned int i;
12381 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12382 ? SHT_REL : SHT_RELA);
12383 dyn.d_un.d_val = 0;
12384 for (i = 1; i < elf_numsections (output_bfd); i++)
12385 {
12386 Elf_Internal_Shdr *hdr
12387 = elf_elfsections (output_bfd)[i];
12388 if (hdr->sh_type == type)
12389 {
12390 if (dyn.d_tag == DT_RELSZ
12391 || dyn.d_tag == DT_RELASZ)
12392 dyn.d_un.d_val += hdr->sh_size;
12393 else if ((ufile_ptr) hdr->sh_offset
12394 <= dyn.d_un.d_val - 1)
12395 dyn.d_un.d_val = hdr->sh_offset;
12396 }
12397 }
12398 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12399 }
12400 break;
12401
12402 /* Set the bottom bit of DT_INIT/FINI if the
12403 corresponding function is Thumb. */
12404 case DT_INIT:
12405 name = info->init_function;
12406 goto get_sym;
12407 case DT_FINI:
12408 name = info->fini_function;
12409 get_sym:
12410 /* If it wasn't set by elf_bfd_final_link
12411 then there is nothing to adjust. */
12412 if (dyn.d_un.d_val != 0)
12413 {
12414 struct elf_link_hash_entry * eh;
12415
12416 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12417 FALSE, FALSE, TRUE);
12418 if (eh != NULL
12419 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12420 {
12421 dyn.d_un.d_val |= 1;
12422 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12423 }
12424 }
12425 break;
12426 }
12427 }
12428
12429 /* Fill in the first entry in the procedure linkage table. */
12430 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12431 {
12432 const bfd_vma *plt0_entry;
12433 bfd_vma got_address, plt_address, got_displacement;
12434
12435 /* Calculate the addresses of the GOT and PLT. */
12436 got_address = sgot->output_section->vma + sgot->output_offset;
12437 plt_address = splt->output_section->vma + splt->output_offset;
12438
12439 if (htab->vxworks_p)
12440 {
12441 /* The VxWorks GOT is relocated by the dynamic linker.
12442 Therefore, we must emit relocations rather than simply
12443 computing the values now. */
12444 Elf_Internal_Rela rel;
12445
12446 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12447 put_arm_insn (htab, output_bfd, plt0_entry[0],
12448 splt->contents + 0);
12449 put_arm_insn (htab, output_bfd, plt0_entry[1],
12450 splt->contents + 4);
12451 put_arm_insn (htab, output_bfd, plt0_entry[2],
12452 splt->contents + 8);
12453 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12454
12455 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12456 rel.r_offset = plt_address + 12;
12457 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12458 rel.r_addend = 0;
12459 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12460 htab->srelplt2->contents);
12461 }
12462 else
12463 {
12464 got_displacement = got_address - (plt_address + 16);
12465
12466 plt0_entry = elf32_arm_plt0_entry;
12467 put_arm_insn (htab, output_bfd, plt0_entry[0],
12468 splt->contents + 0);
12469 put_arm_insn (htab, output_bfd, plt0_entry[1],
12470 splt->contents + 4);
12471 put_arm_insn (htab, output_bfd, plt0_entry[2],
12472 splt->contents + 8);
12473 put_arm_insn (htab, output_bfd, plt0_entry[3],
12474 splt->contents + 12);
12475
12476 #ifdef FOUR_WORD_PLT
12477 /* The displacement value goes in the otherwise-unused
12478 last word of the second entry. */
12479 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12480 #else
12481 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12482 #endif
12483 }
12484 }
12485
12486 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12487 really seem like the right value. */
12488 if (splt->output_section->owner == output_bfd)
12489 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12490
12491 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12492 {
12493 /* Correct the .rel(a).plt.unloaded relocations. They will have
12494 incorrect symbol indexes. */
12495 int num_plts;
12496 unsigned char *p;
12497
12498 num_plts = ((htab->splt->size - htab->plt_header_size)
12499 / htab->plt_entry_size);
12500 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12501
12502 for (; num_plts; num_plts--)
12503 {
12504 Elf_Internal_Rela rel;
12505
12506 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12507 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12508 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12509 p += RELOC_SIZE (htab);
12510
12511 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12512 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12513 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12514 p += RELOC_SIZE (htab);
12515 }
12516 }
12517 }
12518
12519 /* Fill in the first three entries in the global offset table. */
12520 if (sgot)
12521 {
12522 if (sgot->size > 0)
12523 {
12524 if (sdyn == NULL)
12525 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12526 else
12527 bfd_put_32 (output_bfd,
12528 sdyn->output_section->vma + sdyn->output_offset,
12529 sgot->contents);
12530 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12531 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12532 }
12533
12534 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12535 }
12536
12537 return TRUE;
12538 }
12539
12540 static void
12541 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12542 {
12543 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12544 struct elf32_arm_link_hash_table *globals;
12545
12546 i_ehdrp = elf_elfheader (abfd);
12547
12548 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12549 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12550 else
12551 i_ehdrp->e_ident[EI_OSABI] = 0;
12552 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12553
12554 if (link_info)
12555 {
12556 globals = elf32_arm_hash_table (link_info);
12557 if (globals->byteswap_code)
12558 i_ehdrp->e_flags |= EF_ARM_BE8;
12559 }
12560 }
12561
12562 static enum elf_reloc_type_class
12563 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12564 {
12565 switch ((int) ELF32_R_TYPE (rela->r_info))
12566 {
12567 case R_ARM_RELATIVE:
12568 return reloc_class_relative;
12569 case R_ARM_JUMP_SLOT:
12570 return reloc_class_plt;
12571 case R_ARM_COPY:
12572 return reloc_class_copy;
12573 default:
12574 return reloc_class_normal;
12575 }
12576 }
12577
12578 /* Set the right machine number for an Arm ELF file. */
12579
12580 static bfd_boolean
12581 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12582 {
12583 if (hdr->sh_type == SHT_NOTE)
12584 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12585
12586 return TRUE;
12587 }
12588
12589 static void
12590 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12591 {
12592 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12593 }
12594
12595 /* Return TRUE if this is an unwinding table entry. */
12596
12597 static bfd_boolean
12598 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12599 {
12600 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12601 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12602 }
12603
12604
12605 /* Set the type and flags for an ARM section. We do this by
12606 the section name, which is a hack, but ought to work. */
12607
12608 static bfd_boolean
12609 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12610 {
12611 const char * name;
12612
12613 name = bfd_get_section_name (abfd, sec);
12614
12615 if (is_arm_elf_unwind_section_name (abfd, name))
12616 {
12617 hdr->sh_type = SHT_ARM_EXIDX;
12618 hdr->sh_flags |= SHF_LINK_ORDER;
12619 }
12620 return TRUE;
12621 }
12622
12623 /* Handle an ARM specific section when reading an object file. This is
12624 called when bfd_section_from_shdr finds a section with an unknown
12625 type. */
12626
12627 static bfd_boolean
12628 elf32_arm_section_from_shdr (bfd *abfd,
12629 Elf_Internal_Shdr * hdr,
12630 const char *name,
12631 int shindex)
12632 {
12633 /* There ought to be a place to keep ELF backend specific flags, but
12634 at the moment there isn't one. We just keep track of the
12635 sections by their name, instead. Fortunately, the ABI gives
12636 names for all the ARM specific sections, so we will probably get
12637 away with this. */
12638 switch (hdr->sh_type)
12639 {
12640 case SHT_ARM_EXIDX:
12641 case SHT_ARM_PREEMPTMAP:
12642 case SHT_ARM_ATTRIBUTES:
12643 break;
12644
12645 default:
12646 return FALSE;
12647 }
12648
12649 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12650 return FALSE;
12651
12652 return TRUE;
12653 }
12654
12655 /* A structure used to record a list of sections, independently
12656 of the next and prev fields in the asection structure. */
12657 typedef struct section_list
12658 {
12659 asection * sec;
12660 struct section_list * next;
12661 struct section_list * prev;
12662 }
12663 section_list;
12664
12665 /* Unfortunately we need to keep a list of sections for which
12666 an _arm_elf_section_data structure has been allocated. This
12667 is because it is possible for functions like elf32_arm_write_section
12668 to be called on a section which has had an elf_data_structure
12669 allocated for it (and so the used_by_bfd field is valid) but
12670 for which the ARM extended version of this structure - the
12671 _arm_elf_section_data structure - has not been allocated. */
12672 static section_list * sections_with_arm_elf_section_data = NULL;
12673
12674 static void
12675 record_section_with_arm_elf_section_data (asection * sec)
12676 {
12677 struct section_list * entry;
12678
12679 entry = bfd_malloc (sizeof (* entry));
12680 if (entry == NULL)
12681 return;
12682 entry->sec = sec;
12683 entry->next = sections_with_arm_elf_section_data;
12684 entry->prev = NULL;
12685 if (entry->next != NULL)
12686 entry->next->prev = entry;
12687 sections_with_arm_elf_section_data = entry;
12688 }
12689
12690 static struct section_list *
12691 find_arm_elf_section_entry (asection * sec)
12692 {
12693 struct section_list * entry;
12694 static struct section_list * last_entry = NULL;
12695
12696 /* This is a short cut for the typical case where the sections are added
12697 to the sections_with_arm_elf_section_data list in forward order and
12698 then looked up here in backwards order. This makes a real difference
12699 to the ld-srec/sec64k.exp linker test. */
12700 entry = sections_with_arm_elf_section_data;
12701 if (last_entry != NULL)
12702 {
12703 if (last_entry->sec == sec)
12704 entry = last_entry;
12705 else if (last_entry->next != NULL
12706 && last_entry->next->sec == sec)
12707 entry = last_entry->next;
12708 }
12709
12710 for (; entry; entry = entry->next)
12711 if (entry->sec == sec)
12712 break;
12713
12714 if (entry)
12715 /* Record the entry prior to this one - it is the entry we are most
12716 likely to want to locate next time. Also this way if we have been
12717 called from unrecord_section_with_arm_elf_section_data() we will not
12718 be caching a pointer that is about to be freed. */
12719 last_entry = entry->prev;
12720
12721 return entry;
12722 }
12723
12724 static _arm_elf_section_data *
12725 get_arm_elf_section_data (asection * sec)
12726 {
12727 struct section_list * entry;
12728
12729 entry = find_arm_elf_section_entry (sec);
12730
12731 if (entry)
12732 return elf32_arm_section_data (entry->sec);
12733 else
12734 return NULL;
12735 }
12736
12737 static void
12738 unrecord_section_with_arm_elf_section_data (asection * sec)
12739 {
12740 struct section_list * entry;
12741
12742 entry = find_arm_elf_section_entry (sec);
12743
12744 if (entry)
12745 {
12746 if (entry->prev != NULL)
12747 entry->prev->next = entry->next;
12748 if (entry->next != NULL)
12749 entry->next->prev = entry->prev;
12750 if (entry == sections_with_arm_elf_section_data)
12751 sections_with_arm_elf_section_data = entry->next;
12752 free (entry);
12753 }
12754 }
12755
12756
12757 typedef struct
12758 {
12759 void *finfo;
12760 struct bfd_link_info *info;
12761 asection *sec;
12762 int sec_shndx;
12763 int (*func) (void *, const char *, Elf_Internal_Sym *,
12764 asection *, struct elf_link_hash_entry *);
12765 } output_arch_syminfo;
12766
12767 enum map_symbol_type
12768 {
12769 ARM_MAP_ARM,
12770 ARM_MAP_THUMB,
12771 ARM_MAP_DATA
12772 };
12773
12774
12775 /* Output a single mapping symbol. */
12776
12777 static bfd_boolean
12778 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12779 enum map_symbol_type type,
12780 bfd_vma offset)
12781 {
12782 static const char *names[3] = {"$a", "$t", "$d"};
12783 struct elf32_arm_link_hash_table *htab;
12784 Elf_Internal_Sym sym;
12785
12786 htab = elf32_arm_hash_table (osi->info);
12787 sym.st_value = osi->sec->output_section->vma
12788 + osi->sec->output_offset
12789 + offset;
12790 sym.st_size = 0;
12791 sym.st_other = 0;
12792 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12793 sym.st_shndx = osi->sec_shndx;
12794 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12795 }
12796
12797
12798 /* Output mapping symbols for PLT entries associated with H. */
12799
12800 static bfd_boolean
12801 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12802 {
12803 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12804 struct elf32_arm_link_hash_table *htab;
12805 struct elf32_arm_link_hash_entry *eh;
12806 bfd_vma addr;
12807
12808 htab = elf32_arm_hash_table (osi->info);
12809
12810 if (h->root.type == bfd_link_hash_indirect)
12811 return TRUE;
12812
12813 if (h->root.type == bfd_link_hash_warning)
12814 /* When warning symbols are created, they **replace** the "real"
12815 entry in the hash table, thus we never get to see the real
12816 symbol in a hash traversal. So look at it now. */
12817 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12818
12819 if (h->plt.offset == (bfd_vma) -1)
12820 return TRUE;
12821
12822 eh = (struct elf32_arm_link_hash_entry *) h;
12823 addr = h->plt.offset;
12824 if (htab->symbian_p)
12825 {
12826 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12827 return FALSE;
12828 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12829 return FALSE;
12830 }
12831 else if (htab->vxworks_p)
12832 {
12833 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12834 return FALSE;
12835 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12836 return FALSE;
12837 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12838 return FALSE;
12839 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12840 return FALSE;
12841 }
12842 else
12843 {
12844 bfd_signed_vma thumb_refs;
12845
12846 thumb_refs = eh->plt_thumb_refcount;
12847 if (!htab->use_blx)
12848 thumb_refs += eh->plt_maybe_thumb_refcount;
12849
12850 if (thumb_refs > 0)
12851 {
12852 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12853 return FALSE;
12854 }
12855 #ifdef FOUR_WORD_PLT
12856 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12857 return FALSE;
12858 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12859 return FALSE;
12860 #else
12861 /* A three-word PLT with no Thumb thunk contains only Arm code,
12862 so only need to output a mapping symbol for the first PLT entry and
12863 entries with thumb thunks. */
12864 if (thumb_refs > 0 || addr == 20)
12865 {
12866 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12867 return FALSE;
12868 }
12869 #endif
12870 }
12871
12872 return TRUE;
12873 }
12874
12875 /* Output a single local symbol for a generated stub. */
12876
12877 static bfd_boolean
12878 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12879 bfd_vma offset, bfd_vma size)
12880 {
12881 struct elf32_arm_link_hash_table *htab;
12882 Elf_Internal_Sym sym;
12883
12884 htab = elf32_arm_hash_table (osi->info);
12885 sym.st_value = osi->sec->output_section->vma
12886 + osi->sec->output_offset
12887 + offset;
12888 sym.st_size = size;
12889 sym.st_other = 0;
12890 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12891 sym.st_shndx = osi->sec_shndx;
12892 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12893 }
12894
12895 static bfd_boolean
12896 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12897 void * in_arg)
12898 {
12899 struct elf32_arm_stub_hash_entry *stub_entry;
12900 struct bfd_link_info *info;
12901 struct elf32_arm_link_hash_table *htab;
12902 asection *stub_sec;
12903 bfd_vma addr;
12904 char *stub_name;
12905 output_arch_syminfo *osi;
12906 const insn_sequence *template;
12907 enum stub_insn_type prev_type;
12908 int size;
12909 int i;
12910 enum map_symbol_type sym_type;
12911
12912 /* Massage our args to the form they really have. */
12913 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12914 osi = (output_arch_syminfo *) in_arg;
12915
12916 info = osi->info;
12917
12918 htab = elf32_arm_hash_table (info);
12919 stub_sec = stub_entry->stub_sec;
12920
12921 /* Ensure this stub is attached to the current section being
12922 processed. */
12923 if (stub_sec != osi->sec)
12924 return TRUE;
12925
12926 addr = (bfd_vma) stub_entry->stub_offset;
12927 stub_name = stub_entry->output_name;
12928
12929 template = stub_entry->stub_template;
12930 switch (template[0].type)
12931 {
12932 case ARM_TYPE:
12933 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12934 return FALSE;
12935 break;
12936 case THUMB16_TYPE:
12937 case THUMB32_TYPE:
12938 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12939 stub_entry->stub_size))
12940 return FALSE;
12941 break;
12942 default:
12943 BFD_FAIL ();
12944 return 0;
12945 }
12946
12947 prev_type = DATA_TYPE;
12948 size = 0;
12949 for (i = 0; i < stub_entry->stub_template_size; i++)
12950 {
12951 switch (template[i].type)
12952 {
12953 case ARM_TYPE:
12954 sym_type = ARM_MAP_ARM;
12955 break;
12956
12957 case THUMB16_TYPE:
12958 case THUMB32_TYPE:
12959 sym_type = ARM_MAP_THUMB;
12960 break;
12961
12962 case DATA_TYPE:
12963 sym_type = ARM_MAP_DATA;
12964 break;
12965
12966 default:
12967 BFD_FAIL ();
12968 return FALSE;
12969 }
12970
12971 if (template[i].type != prev_type)
12972 {
12973 prev_type = template[i].type;
12974 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12975 return FALSE;
12976 }
12977
12978 switch (template[i].type)
12979 {
12980 case ARM_TYPE:
12981 case THUMB32_TYPE:
12982 size += 4;
12983 break;
12984
12985 case THUMB16_TYPE:
12986 size += 2;
12987 break;
12988
12989 case DATA_TYPE:
12990 size += 4;
12991 break;
12992
12993 default:
12994 BFD_FAIL ();
12995 return FALSE;
12996 }
12997 }
12998
12999 return TRUE;
13000 }
13001
13002 /* Output mapping symbols for linker generated sections. */
13003
13004 static bfd_boolean
13005 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13006 struct bfd_link_info *info,
13007 void *finfo,
13008 int (*func) (void *, const char *,
13009 Elf_Internal_Sym *,
13010 asection *,
13011 struct elf_link_hash_entry *))
13012 {
13013 output_arch_syminfo osi;
13014 struct elf32_arm_link_hash_table *htab;
13015 bfd_vma offset;
13016 bfd_size_type size;
13017
13018 htab = elf32_arm_hash_table (info);
13019 check_use_blx (htab);
13020
13021 osi.finfo = finfo;
13022 osi.info = info;
13023 osi.func = func;
13024
13025 /* ARM->Thumb glue. */
13026 if (htab->arm_glue_size > 0)
13027 {
13028 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13029 ARM2THUMB_GLUE_SECTION_NAME);
13030
13031 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13032 (output_bfd, osi.sec->output_section);
13033 if (info->shared || htab->root.is_relocatable_executable
13034 || htab->pic_veneer)
13035 size = ARM2THUMB_PIC_GLUE_SIZE;
13036 else if (htab->use_blx)
13037 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13038 else
13039 size = ARM2THUMB_STATIC_GLUE_SIZE;
13040
13041 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13042 {
13043 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13044 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13045 }
13046 }
13047
13048 /* Thumb->ARM glue. */
13049 if (htab->thumb_glue_size > 0)
13050 {
13051 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13052 THUMB2ARM_GLUE_SECTION_NAME);
13053
13054 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13055 (output_bfd, osi.sec->output_section);
13056 size = THUMB2ARM_GLUE_SIZE;
13057
13058 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13059 {
13060 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13061 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13062 }
13063 }
13064
13065 /* ARMv4 BX veneers. */
13066 if (htab->bx_glue_size > 0)
13067 {
13068 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13069 ARM_BX_GLUE_SECTION_NAME);
13070
13071 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13072 (output_bfd, osi.sec->output_section);
13073
13074 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13075 }
13076
13077 /* Long calls stubs. */
13078 if (htab->stub_bfd && htab->stub_bfd->sections)
13079 {
13080 asection* stub_sec;
13081
13082 for (stub_sec = htab->stub_bfd->sections;
13083 stub_sec != NULL;
13084 stub_sec = stub_sec->next)
13085 {
13086 /* Ignore non-stub sections. */
13087 if (!strstr (stub_sec->name, STUB_SUFFIX))
13088 continue;
13089
13090 osi.sec = stub_sec;
13091
13092 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13093 (output_bfd, osi.sec->output_section);
13094
13095 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13096 }
13097 }
13098
13099 /* Finally, output mapping symbols for the PLT. */
13100 if (!htab->splt || htab->splt->size == 0)
13101 return TRUE;
13102
13103 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13104 htab->splt->output_section);
13105 osi.sec = htab->splt;
13106 /* Output mapping symbols for the plt header. SymbianOS does not have a
13107 plt header. */
13108 if (htab->vxworks_p)
13109 {
13110 /* VxWorks shared libraries have no PLT header. */
13111 if (!info->shared)
13112 {
13113 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13114 return FALSE;
13115 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13116 return FALSE;
13117 }
13118 }
13119 else if (!htab->symbian_p)
13120 {
13121 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13122 return FALSE;
13123 #ifndef FOUR_WORD_PLT
13124 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13125 return FALSE;
13126 #endif
13127 }
13128
13129 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13130 return TRUE;
13131 }
13132
13133 /* Allocate target specific section data. */
13134
13135 static bfd_boolean
13136 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13137 {
13138 if (!sec->used_by_bfd)
13139 {
13140 _arm_elf_section_data *sdata;
13141 bfd_size_type amt = sizeof (*sdata);
13142
13143 sdata = bfd_zalloc (abfd, amt);
13144 if (sdata == NULL)
13145 return FALSE;
13146 sec->used_by_bfd = sdata;
13147 }
13148
13149 record_section_with_arm_elf_section_data (sec);
13150
13151 return _bfd_elf_new_section_hook (abfd, sec);
13152 }
13153
13154
13155 /* Used to order a list of mapping symbols by address. */
13156
13157 static int
13158 elf32_arm_compare_mapping (const void * a, const void * b)
13159 {
13160 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13161 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13162
13163 if (amap->vma > bmap->vma)
13164 return 1;
13165 else if (amap->vma < bmap->vma)
13166 return -1;
13167 else if (amap->type > bmap->type)
13168 /* Ensure results do not depend on the host qsort for objects with
13169 multiple mapping symbols at the same address by sorting on type
13170 after vma. */
13171 return 1;
13172 else if (amap->type < bmap->type)
13173 return -1;
13174 else
13175 return 0;
13176 }
13177
13178 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13179
13180 static unsigned long
13181 offset_prel31 (unsigned long addr, bfd_vma offset)
13182 {
13183 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13184 }
13185
13186 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13187 relocations. */
13188
13189 static void
13190 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13191 {
13192 unsigned long first_word = bfd_get_32 (output_bfd, from);
13193 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13194
13195 /* High bit of first word is supposed to be zero. */
13196 if ((first_word & 0x80000000ul) == 0)
13197 first_word = offset_prel31 (first_word, offset);
13198
13199 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13200 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13201 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13202 second_word = offset_prel31 (second_word, offset);
13203
13204 bfd_put_32 (output_bfd, first_word, to);
13205 bfd_put_32 (output_bfd, second_word, to + 4);
13206 }
13207
13208 /* Data for make_branch_to_a8_stub(). */
13209
13210 struct a8_branch_to_stub_data {
13211 asection *writing_section;
13212 bfd_byte *contents;
13213 };
13214
13215
13216 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13217 places for a particular section. */
13218
13219 static bfd_boolean
13220 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13221 void *in_arg)
13222 {
13223 struct elf32_arm_stub_hash_entry *stub_entry;
13224 struct a8_branch_to_stub_data *data;
13225 bfd_byte *contents;
13226 unsigned long branch_insn;
13227 bfd_vma veneered_insn_loc, veneer_entry_loc;
13228 bfd_signed_vma branch_offset;
13229 bfd *abfd;
13230 unsigned int index;
13231
13232 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13233 data = (struct a8_branch_to_stub_data *) in_arg;
13234
13235 if (stub_entry->target_section != data->writing_section
13236 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13237 return TRUE;
13238
13239 contents = data->contents;
13240
13241 veneered_insn_loc = stub_entry->target_section->output_section->vma
13242 + stub_entry->target_section->output_offset
13243 + stub_entry->target_value;
13244
13245 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13246 + stub_entry->stub_sec->output_offset
13247 + stub_entry->stub_offset;
13248
13249 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13250 veneered_insn_loc &= ~3u;
13251
13252 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13253
13254 abfd = stub_entry->target_section->owner;
13255 index = stub_entry->target_value;
13256
13257 /* We attempt to avoid this condition by setting stubs_always_after_branch
13258 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13259 This check is just to be on the safe side... */
13260 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13261 {
13262 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13263 "allocated in unsafe location"), abfd);
13264 return FALSE;
13265 }
13266
13267 switch (stub_entry->stub_type)
13268 {
13269 case arm_stub_a8_veneer_b:
13270 case arm_stub_a8_veneer_b_cond:
13271 branch_insn = 0xf0009000;
13272 goto jump24;
13273
13274 case arm_stub_a8_veneer_blx:
13275 branch_insn = 0xf000e800;
13276 goto jump24;
13277
13278 case arm_stub_a8_veneer_bl:
13279 {
13280 unsigned int i1, j1, i2, j2, s;
13281
13282 branch_insn = 0xf000d000;
13283
13284 jump24:
13285 if (branch_offset < -16777216 || branch_offset > 16777214)
13286 {
13287 /* There's not much we can do apart from complain if this
13288 happens. */
13289 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13290 "of range (input file too large)"), abfd);
13291 return FALSE;
13292 }
13293
13294 /* i1 = not(j1 eor s), so:
13295 not i1 = j1 eor s
13296 j1 = (not i1) eor s. */
13297
13298 branch_insn |= (branch_offset >> 1) & 0x7ff;
13299 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13300 i2 = (branch_offset >> 22) & 1;
13301 i1 = (branch_offset >> 23) & 1;
13302 s = (branch_offset >> 24) & 1;
13303 j1 = (!i1) ^ s;
13304 j2 = (!i2) ^ s;
13305 branch_insn |= j2 << 11;
13306 branch_insn |= j1 << 13;
13307 branch_insn |= s << 26;
13308 }
13309 break;
13310
13311 default:
13312 BFD_FAIL ();
13313 return FALSE;
13314 }
13315
13316 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13317 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13318
13319 return TRUE;
13320 }
13321
13322 /* Do code byteswapping. Return FALSE afterwards so that the section is
13323 written out as normal. */
13324
13325 static bfd_boolean
13326 elf32_arm_write_section (bfd *output_bfd,
13327 struct bfd_link_info *link_info,
13328 asection *sec,
13329 bfd_byte *contents)
13330 {
13331 unsigned int mapcount, errcount;
13332 _arm_elf_section_data *arm_data;
13333 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13334 elf32_arm_section_map *map;
13335 elf32_vfp11_erratum_list *errnode;
13336 bfd_vma ptr;
13337 bfd_vma end;
13338 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13339 bfd_byte tmp;
13340 unsigned int i;
13341
13342 /* If this section has not been allocated an _arm_elf_section_data
13343 structure then we cannot record anything. */
13344 arm_data = get_arm_elf_section_data (sec);
13345 if (arm_data == NULL)
13346 return FALSE;
13347
13348 mapcount = arm_data->mapcount;
13349 map = arm_data->map;
13350 errcount = arm_data->erratumcount;
13351
13352 if (errcount != 0)
13353 {
13354 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13355
13356 for (errnode = arm_data->erratumlist; errnode != 0;
13357 errnode = errnode->next)
13358 {
13359 bfd_vma index = errnode->vma - offset;
13360
13361 switch (errnode->type)
13362 {
13363 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13364 {
13365 bfd_vma branch_to_veneer;
13366 /* Original condition code of instruction, plus bit mask for
13367 ARM B instruction. */
13368 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13369 | 0x0a000000;
13370
13371 /* The instruction is before the label. */
13372 index -= 4;
13373
13374 /* Above offset included in -4 below. */
13375 branch_to_veneer = errnode->u.b.veneer->vma
13376 - errnode->vma - 4;
13377
13378 if ((signed) branch_to_veneer < -(1 << 25)
13379 || (signed) branch_to_veneer >= (1 << 25))
13380 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13381 "range"), output_bfd);
13382
13383 insn |= (branch_to_veneer >> 2) & 0xffffff;
13384 contents[endianflip ^ index] = insn & 0xff;
13385 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13386 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13387 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13388 }
13389 break;
13390
13391 case VFP11_ERRATUM_ARM_VENEER:
13392 {
13393 bfd_vma branch_from_veneer;
13394 unsigned int insn;
13395
13396 /* Take size of veneer into account. */
13397 branch_from_veneer = errnode->u.v.branch->vma
13398 - errnode->vma - 12;
13399
13400 if ((signed) branch_from_veneer < -(1 << 25)
13401 || (signed) branch_from_veneer >= (1 << 25))
13402 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13403 "range"), output_bfd);
13404
13405 /* Original instruction. */
13406 insn = errnode->u.v.branch->u.b.vfp_insn;
13407 contents[endianflip ^ index] = insn & 0xff;
13408 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13409 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13410 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13411
13412 /* Branch back to insn after original insn. */
13413 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13414 contents[endianflip ^ (index + 4)] = insn & 0xff;
13415 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13416 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13417 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13418 }
13419 break;
13420
13421 default:
13422 abort ();
13423 }
13424 }
13425 }
13426
13427 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13428 {
13429 arm_unwind_table_edit *edit_node
13430 = arm_data->u.exidx.unwind_edit_list;
13431 /* Now, sec->size is the size of the section we will write. The original
13432 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13433 markers) was sec->rawsize. (This isn't the case if we perform no
13434 edits, then rawsize will be zero and we should use size). */
13435 bfd_byte *edited_contents = bfd_malloc (sec->size);
13436 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13437 unsigned int in_index, out_index;
13438 bfd_vma add_to_offsets = 0;
13439
13440 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13441 {
13442 if (edit_node)
13443 {
13444 unsigned int edit_index = edit_node->index;
13445
13446 if (in_index < edit_index && in_index * 8 < input_size)
13447 {
13448 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13449 contents + in_index * 8, add_to_offsets);
13450 out_index++;
13451 in_index++;
13452 }
13453 else if (in_index == edit_index
13454 || (in_index * 8 >= input_size
13455 && edit_index == UINT_MAX))
13456 {
13457 switch (edit_node->type)
13458 {
13459 case DELETE_EXIDX_ENTRY:
13460 in_index++;
13461 add_to_offsets += 8;
13462 break;
13463
13464 case INSERT_EXIDX_CANTUNWIND_AT_END:
13465 {
13466 asection *text_sec = edit_node->linked_section;
13467 bfd_vma text_offset = text_sec->output_section->vma
13468 + text_sec->output_offset
13469 + text_sec->size;
13470 bfd_vma exidx_offset = offset + out_index * 8;
13471 unsigned long prel31_offset;
13472
13473 /* Note: this is meant to be equivalent to an
13474 R_ARM_PREL31 relocation. These synthetic
13475 EXIDX_CANTUNWIND markers are not relocated by the
13476 usual BFD method. */
13477 prel31_offset = (text_offset - exidx_offset)
13478 & 0x7ffffffful;
13479
13480 /* First address we can't unwind. */
13481 bfd_put_32 (output_bfd, prel31_offset,
13482 &edited_contents[out_index * 8]);
13483
13484 /* Code for EXIDX_CANTUNWIND. */
13485 bfd_put_32 (output_bfd, 0x1,
13486 &edited_contents[out_index * 8 + 4]);
13487
13488 out_index++;
13489 add_to_offsets -= 8;
13490 }
13491 break;
13492 }
13493
13494 edit_node = edit_node->next;
13495 }
13496 }
13497 else
13498 {
13499 /* No more edits, copy remaining entries verbatim. */
13500 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13501 contents + in_index * 8, add_to_offsets);
13502 out_index++;
13503 in_index++;
13504 }
13505 }
13506
13507 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13508 bfd_set_section_contents (output_bfd, sec->output_section,
13509 edited_contents,
13510 (file_ptr) sec->output_offset, sec->size);
13511
13512 return TRUE;
13513 }
13514
13515 /* Fix code to point to Cortex-A8 erratum stubs. */
13516 if (globals->fix_cortex_a8)
13517 {
13518 struct a8_branch_to_stub_data data;
13519
13520 data.writing_section = sec;
13521 data.contents = contents;
13522
13523 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13524 &data);
13525 }
13526
13527 if (mapcount == 0)
13528 return FALSE;
13529
13530 if (globals->byteswap_code)
13531 {
13532 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13533
13534 ptr = map[0].vma;
13535 for (i = 0; i < mapcount; i++)
13536 {
13537 if (i == mapcount - 1)
13538 end = sec->size;
13539 else
13540 end = map[i + 1].vma;
13541
13542 switch (map[i].type)
13543 {
13544 case 'a':
13545 /* Byte swap code words. */
13546 while (ptr + 3 < end)
13547 {
13548 tmp = contents[ptr];
13549 contents[ptr] = contents[ptr + 3];
13550 contents[ptr + 3] = tmp;
13551 tmp = contents[ptr + 1];
13552 contents[ptr + 1] = contents[ptr + 2];
13553 contents[ptr + 2] = tmp;
13554 ptr += 4;
13555 }
13556 break;
13557
13558 case 't':
13559 /* Byte swap code halfwords. */
13560 while (ptr + 1 < end)
13561 {
13562 tmp = contents[ptr];
13563 contents[ptr] = contents[ptr + 1];
13564 contents[ptr + 1] = tmp;
13565 ptr += 2;
13566 }
13567 break;
13568
13569 case 'd':
13570 /* Leave data alone. */
13571 break;
13572 }
13573 ptr = end;
13574 }
13575 }
13576
13577 free (map);
13578 arm_data->mapcount = 0;
13579 arm_data->mapsize = 0;
13580 arm_data->map = NULL;
13581 unrecord_section_with_arm_elf_section_data (sec);
13582
13583 return FALSE;
13584 }
13585
13586 static void
13587 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13588 asection * sec,
13589 void * ignore ATTRIBUTE_UNUSED)
13590 {
13591 unrecord_section_with_arm_elf_section_data (sec);
13592 }
13593
13594 static bfd_boolean
13595 elf32_arm_close_and_cleanup (bfd * abfd)
13596 {
13597 if (abfd->sections)
13598 bfd_map_over_sections (abfd,
13599 unrecord_section_via_map_over_sections,
13600 NULL);
13601
13602 return _bfd_elf_close_and_cleanup (abfd);
13603 }
13604
13605 static bfd_boolean
13606 elf32_arm_bfd_free_cached_info (bfd * abfd)
13607 {
13608 if (abfd->sections)
13609 bfd_map_over_sections (abfd,
13610 unrecord_section_via_map_over_sections,
13611 NULL);
13612
13613 return _bfd_free_cached_info (abfd);
13614 }
13615
13616 /* Display STT_ARM_TFUNC symbols as functions. */
13617
13618 static void
13619 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13620 asymbol *asym)
13621 {
13622 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13623
13624 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13625 elfsym->symbol.flags |= BSF_FUNCTION;
13626 }
13627
13628
13629 /* Mangle thumb function symbols as we read them in. */
13630
13631 static bfd_boolean
13632 elf32_arm_swap_symbol_in (bfd * abfd,
13633 const void *psrc,
13634 const void *pshn,
13635 Elf_Internal_Sym *dst)
13636 {
13637 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13638 return FALSE;
13639
13640 /* New EABI objects mark thumb function symbols by setting the low bit of
13641 the address. Turn these into STT_ARM_TFUNC. */
13642 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13643 && (dst->st_value & 1))
13644 {
13645 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13646 dst->st_value &= ~(bfd_vma) 1;
13647 }
13648 return TRUE;
13649 }
13650
13651
13652 /* Mangle thumb function symbols as we write them out. */
13653
13654 static void
13655 elf32_arm_swap_symbol_out (bfd *abfd,
13656 const Elf_Internal_Sym *src,
13657 void *cdst,
13658 void *shndx)
13659 {
13660 Elf_Internal_Sym newsym;
13661
13662 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13663 of the address set, as per the new EABI. We do this unconditionally
13664 because objcopy does not set the elf header flags until after
13665 it writes out the symbol table. */
13666 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13667 {
13668 newsym = *src;
13669 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13670 if (newsym.st_shndx != SHN_UNDEF)
13671 {
13672 /* Do this only for defined symbols. At link type, the static
13673 linker will simulate the work of dynamic linker of resolving
13674 symbols and will carry over the thumbness of found symbols to
13675 the output symbol table. It's not clear how it happens, but
13676 the thumbness of undefined symbols can well be different at
13677 runtime, and writing '1' for them will be confusing for users
13678 and possibly for dynamic linker itself.
13679 */
13680 newsym.st_value |= 1;
13681 }
13682
13683 src = &newsym;
13684 }
13685 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13686 }
13687
13688 /* Add the PT_ARM_EXIDX program header. */
13689
13690 static bfd_boolean
13691 elf32_arm_modify_segment_map (bfd *abfd,
13692 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13693 {
13694 struct elf_segment_map *m;
13695 asection *sec;
13696
13697 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13698 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13699 {
13700 /* If there is already a PT_ARM_EXIDX header, then we do not
13701 want to add another one. This situation arises when running
13702 "strip"; the input binary already has the header. */
13703 m = elf_tdata (abfd)->segment_map;
13704 while (m && m->p_type != PT_ARM_EXIDX)
13705 m = m->next;
13706 if (!m)
13707 {
13708 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13709 if (m == NULL)
13710 return FALSE;
13711 m->p_type = PT_ARM_EXIDX;
13712 m->count = 1;
13713 m->sections[0] = sec;
13714
13715 m->next = elf_tdata (abfd)->segment_map;
13716 elf_tdata (abfd)->segment_map = m;
13717 }
13718 }
13719
13720 return TRUE;
13721 }
13722
13723 /* We may add a PT_ARM_EXIDX program header. */
13724
13725 static int
13726 elf32_arm_additional_program_headers (bfd *abfd,
13727 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13728 {
13729 asection *sec;
13730
13731 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13732 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13733 return 1;
13734 else
13735 return 0;
13736 }
13737
13738 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13739
13740 static bfd_boolean
13741 elf32_arm_is_function_type (unsigned int type)
13742 {
13743 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13744 }
13745
13746 /* We use this to override swap_symbol_in and swap_symbol_out. */
13747 const struct elf_size_info elf32_arm_size_info =
13748 {
13749 sizeof (Elf32_External_Ehdr),
13750 sizeof (Elf32_External_Phdr),
13751 sizeof (Elf32_External_Shdr),
13752 sizeof (Elf32_External_Rel),
13753 sizeof (Elf32_External_Rela),
13754 sizeof (Elf32_External_Sym),
13755 sizeof (Elf32_External_Dyn),
13756 sizeof (Elf_External_Note),
13757 4,
13758 1,
13759 32, 2,
13760 ELFCLASS32, EV_CURRENT,
13761 bfd_elf32_write_out_phdrs,
13762 bfd_elf32_write_shdrs_and_ehdr,
13763 bfd_elf32_checksum_contents,
13764 bfd_elf32_write_relocs,
13765 elf32_arm_swap_symbol_in,
13766 elf32_arm_swap_symbol_out,
13767 bfd_elf32_slurp_reloc_table,
13768 bfd_elf32_slurp_symbol_table,
13769 bfd_elf32_swap_dyn_in,
13770 bfd_elf32_swap_dyn_out,
13771 bfd_elf32_swap_reloc_in,
13772 bfd_elf32_swap_reloc_out,
13773 bfd_elf32_swap_reloca_in,
13774 bfd_elf32_swap_reloca_out
13775 };
13776
13777 #define ELF_ARCH bfd_arch_arm
13778 #define ELF_MACHINE_CODE EM_ARM
13779 #ifdef __QNXTARGET__
13780 #define ELF_MAXPAGESIZE 0x1000
13781 #else
13782 #define ELF_MAXPAGESIZE 0x8000
13783 #endif
13784 #define ELF_MINPAGESIZE 0x1000
13785 #define ELF_COMMONPAGESIZE 0x1000
13786
13787 #define bfd_elf32_mkobject elf32_arm_mkobject
13788
13789 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13790 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13791 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13792 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13793 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13794 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13795 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13796 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13797 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13798 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13799 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13800 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13801 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13802 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13803 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13804
13805 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13806 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13807 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13808 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13809 #define elf_backend_check_relocs elf32_arm_check_relocs
13810 #define elf_backend_relocate_section elf32_arm_relocate_section
13811 #define elf_backend_write_section elf32_arm_write_section
13812 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13813 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13814 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13815 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13816 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13817 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13818 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13819 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13820 #define elf_backend_object_p elf32_arm_object_p
13821 #define elf_backend_section_flags elf32_arm_section_flags
13822 #define elf_backend_fake_sections elf32_arm_fake_sections
13823 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13824 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13825 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13826 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13827 #define elf_backend_size_info elf32_arm_size_info
13828 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13829 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13830 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13831 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13832 #define elf_backend_is_function_type elf32_arm_is_function_type
13833
13834 #define elf_backend_can_refcount 1
13835 #define elf_backend_can_gc_sections 1
13836 #define elf_backend_plt_readonly 1
13837 #define elf_backend_want_got_plt 1
13838 #define elf_backend_want_plt_sym 0
13839 #define elf_backend_may_use_rel_p 1
13840 #define elf_backend_may_use_rela_p 0
13841 #define elf_backend_default_use_rela_p 0
13842
13843 #define elf_backend_got_header_size 12
13844
13845 #undef elf_backend_obj_attrs_vendor
13846 #define elf_backend_obj_attrs_vendor "aeabi"
13847 #undef elf_backend_obj_attrs_section
13848 #define elf_backend_obj_attrs_section ".ARM.attributes"
13849 #undef elf_backend_obj_attrs_arg_type
13850 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13851 #undef elf_backend_obj_attrs_section_type
13852 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13853 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13854
13855 #include "elf32-target.h"
13856
13857 /* VxWorks Targets. */
13858
13859 #undef TARGET_LITTLE_SYM
13860 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13861 #undef TARGET_LITTLE_NAME
13862 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13863 #undef TARGET_BIG_SYM
13864 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13865 #undef TARGET_BIG_NAME
13866 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13867
13868 /* Like elf32_arm_link_hash_table_create -- but overrides
13869 appropriately for VxWorks. */
13870
13871 static struct bfd_link_hash_table *
13872 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13873 {
13874 struct bfd_link_hash_table *ret;
13875
13876 ret = elf32_arm_link_hash_table_create (abfd);
13877 if (ret)
13878 {
13879 struct elf32_arm_link_hash_table *htab
13880 = (struct elf32_arm_link_hash_table *) ret;
13881 htab->use_rel = 0;
13882 htab->vxworks_p = 1;
13883 }
13884 return ret;
13885 }
13886
13887 static void
13888 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13889 {
13890 elf32_arm_final_write_processing (abfd, linker);
13891 elf_vxworks_final_write_processing (abfd, linker);
13892 }
13893
13894 #undef elf32_bed
13895 #define elf32_bed elf32_arm_vxworks_bed
13896
13897 #undef bfd_elf32_bfd_link_hash_table_create
13898 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13899 #undef elf_backend_add_symbol_hook
13900 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13901 #undef elf_backend_final_write_processing
13902 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13903 #undef elf_backend_emit_relocs
13904 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13905
13906 #undef elf_backend_may_use_rel_p
13907 #define elf_backend_may_use_rel_p 0
13908 #undef elf_backend_may_use_rela_p
13909 #define elf_backend_may_use_rela_p 1
13910 #undef elf_backend_default_use_rela_p
13911 #define elf_backend_default_use_rela_p 1
13912 #undef elf_backend_want_plt_sym
13913 #define elf_backend_want_plt_sym 1
13914 #undef ELF_MAXPAGESIZE
13915 #define ELF_MAXPAGESIZE 0x1000
13916
13917 #include "elf32-target.h"
13918
13919
13920 /* Symbian OS Targets. */
13921
13922 #undef TARGET_LITTLE_SYM
13923 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13924 #undef TARGET_LITTLE_NAME
13925 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13926 #undef TARGET_BIG_SYM
13927 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13928 #undef TARGET_BIG_NAME
13929 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13930
13931 /* Like elf32_arm_link_hash_table_create -- but overrides
13932 appropriately for Symbian OS. */
13933
13934 static struct bfd_link_hash_table *
13935 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13936 {
13937 struct bfd_link_hash_table *ret;
13938
13939 ret = elf32_arm_link_hash_table_create (abfd);
13940 if (ret)
13941 {
13942 struct elf32_arm_link_hash_table *htab
13943 = (struct elf32_arm_link_hash_table *)ret;
13944 /* There is no PLT header for Symbian OS. */
13945 htab->plt_header_size = 0;
13946 /* The PLT entries are each one instruction and one word. */
13947 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13948 htab->symbian_p = 1;
13949 /* Symbian uses armv5t or above, so use_blx is always true. */
13950 htab->use_blx = 1;
13951 htab->root.is_relocatable_executable = 1;
13952 }
13953 return ret;
13954 }
13955
13956 static const struct bfd_elf_special_section
13957 elf32_arm_symbian_special_sections[] =
13958 {
13959 /* In a BPABI executable, the dynamic linking sections do not go in
13960 the loadable read-only segment. The post-linker may wish to
13961 refer to these sections, but they are not part of the final
13962 program image. */
13963 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13964 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13965 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13966 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13967 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13968 /* These sections do not need to be writable as the SymbianOS
13969 postlinker will arrange things so that no dynamic relocation is
13970 required. */
13971 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13972 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13973 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13974 { NULL, 0, 0, 0, 0 }
13975 };
13976
13977 static void
13978 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13979 struct bfd_link_info *link_info)
13980 {
13981 /* BPABI objects are never loaded directly by an OS kernel; they are
13982 processed by a postlinker first, into an OS-specific format. If
13983 the D_PAGED bit is set on the file, BFD will align segments on
13984 page boundaries, so that an OS can directly map the file. With
13985 BPABI objects, that just results in wasted space. In addition,
13986 because we clear the D_PAGED bit, map_sections_to_segments will
13987 recognize that the program headers should not be mapped into any
13988 loadable segment. */
13989 abfd->flags &= ~D_PAGED;
13990 elf32_arm_begin_write_processing (abfd, link_info);
13991 }
13992
13993 static bfd_boolean
13994 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13995 struct bfd_link_info *info)
13996 {
13997 struct elf_segment_map *m;
13998 asection *dynsec;
13999
14000 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14001 segment. However, because the .dynamic section is not marked
14002 with SEC_LOAD, the generic ELF code will not create such a
14003 segment. */
14004 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14005 if (dynsec)
14006 {
14007 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14008 if (m->p_type == PT_DYNAMIC)
14009 break;
14010
14011 if (m == NULL)
14012 {
14013 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14014 m->next = elf_tdata (abfd)->segment_map;
14015 elf_tdata (abfd)->segment_map = m;
14016 }
14017 }
14018
14019 /* Also call the generic arm routine. */
14020 return elf32_arm_modify_segment_map (abfd, info);
14021 }
14022
14023 /* Return address for Ith PLT stub in section PLT, for relocation REL
14024 or (bfd_vma) -1 if it should not be included. */
14025
14026 static bfd_vma
14027 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14028 const arelent *rel ATTRIBUTE_UNUSED)
14029 {
14030 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14031 }
14032
14033
14034 #undef elf32_bed
14035 #define elf32_bed elf32_arm_symbian_bed
14036
14037 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14038 will process them and then discard them. */
14039 #undef ELF_DYNAMIC_SEC_FLAGS
14040 #define ELF_DYNAMIC_SEC_FLAGS \
14041 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14042
14043 #undef elf_backend_add_symbol_hook
14044 #undef elf_backend_emit_relocs
14045
14046 #undef bfd_elf32_bfd_link_hash_table_create
14047 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14048 #undef elf_backend_special_sections
14049 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14050 #undef elf_backend_begin_write_processing
14051 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14052 #undef elf_backend_final_write_processing
14053 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14054
14055 #undef elf_backend_modify_segment_map
14056 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14057
14058 /* There is no .got section for BPABI objects, and hence no header. */
14059 #undef elf_backend_got_header_size
14060 #define elf_backend_got_header_size 0
14061
14062 /* Similarly, there is no .got.plt section. */
14063 #undef elf_backend_want_got_plt
14064 #define elf_backend_want_got_plt 0
14065
14066 #undef elf_backend_plt_sym_val
14067 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14068
14069 #undef elf_backend_may_use_rel_p
14070 #define elf_backend_may_use_rel_p 1
14071 #undef elf_backend_may_use_rela_p
14072 #define elf_backend_may_use_rela_p 0
14073 #undef elf_backend_default_use_rela_p
14074 #define elf_backend_default_use_rela_p 0
14075 #undef elf_backend_want_plt_sym
14076 #define elf_backend_want_plt_sym 0
14077 #undef ELF_MAXPAGESIZE
14078 #define ELF_MAXPAGESIZE 0x8000
14079
14080 #include "elf32-target.h"
This page took 0.358965 seconds and 5 git commands to generate.