2009-11-17 Paul Brook <paul@codesourcery.com>
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
66 asection *sec,
67 bfd_byte *contents);
68
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
71 in that slot. */
72
73 static reloc_howto_type elf32_arm_howto_table_1[] =
74 {
75 /* No relocation. */
76 HOWTO (R_ARM_NONE, /* type */
77 0, /* rightshift */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
79 0, /* bitsize */
80 FALSE, /* pc_relative */
81 0, /* bitpos */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
86 0, /* src_mask */
87 0, /* dst_mask */
88 FALSE), /* pcrel_offset */
89
90 HOWTO (R_ARM_PC24, /* type */
91 2, /* rightshift */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
93 24, /* bitsize */
94 TRUE, /* pc_relative */
95 0, /* bitpos */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
103
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
106 0, /* rightshift */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
108 32, /* bitsize */
109 FALSE, /* pc_relative */
110 0, /* bitpos */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
118
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
121 0, /* rightshift */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
123 32, /* bitsize */
124 TRUE, /* pc_relative */
125 0, /* bitpos */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
133
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
136 0, /* rightshift */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
138 32, /* bitsize */
139 TRUE, /* pc_relative */
140 0, /* bitpos */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
148
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
151 0, /* rightshift */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
153 16, /* bitsize */
154 FALSE, /* pc_relative */
155 0, /* bitpos */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
163
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
166 0, /* rightshift */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
168 12, /* bitsize */
169 FALSE, /* pc_relative */
170 0, /* bitpos */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
178
179 HOWTO (R_ARM_THM_ABS5, /* type */
180 6, /* rightshift */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
182 5, /* bitsize */
183 FALSE, /* pc_relative */
184 0, /* bitpos */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
192
193 /* 8 bit absolute */
194 HOWTO (R_ARM_ABS8, /* type */
195 0, /* rightshift */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
197 8, /* bitsize */
198 FALSE, /* pc_relative */
199 0, /* bitpos */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
207
208 HOWTO (R_ARM_SBREL32, /* type */
209 0, /* rightshift */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
211 32, /* bitsize */
212 FALSE, /* pc_relative */
213 0, /* bitpos */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
221
222 HOWTO (R_ARM_THM_CALL, /* type */
223 1, /* rightshift */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
225 25, /* bitsize */
226 TRUE, /* pc_relative */
227 0, /* bitpos */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
235
236 HOWTO (R_ARM_THM_PC8, /* type */
237 1, /* rightshift */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
239 8, /* bitsize */
240 TRUE, /* pc_relative */
241 0, /* bitpos */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
249
250 HOWTO (R_ARM_BREL_ADJ, /* type */
251 1, /* rightshift */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
253 32, /* bitsize */
254 FALSE, /* pc_relative */
255 0, /* bitpos */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
263
264 HOWTO (R_ARM_SWI24, /* type */
265 0, /* rightshift */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
267 0, /* bitsize */
268 FALSE, /* pc_relative */
269 0, /* bitpos */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
277
278 HOWTO (R_ARM_THM_SWI8, /* type */
279 0, /* rightshift */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
281 0, /* bitsize */
282 FALSE, /* pc_relative */
283 0, /* bitpos */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
291
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
294 2, /* rightshift */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
296 25, /* bitsize */
297 TRUE, /* pc_relative */
298 0, /* bitpos */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
306
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
309 2, /* rightshift */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
311 22, /* bitsize */
312 TRUE, /* pc_relative */
313 0, /* bitpos */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
321
322 /* Dynamic TLS relocations. */
323
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
325 0, /* rightshift */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
327 32, /* bitsize */
328 FALSE, /* pc_relative */
329 0, /* bitpos */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
337
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
339 0, /* rightshift */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
341 32, /* bitsize */
342 FALSE, /* pc_relative */
343 0, /* bitpos */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
351
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
353 0, /* rightshift */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
355 32, /* bitsize */
356 FALSE, /* pc_relative */
357 0, /* bitpos */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
365
366 /* Relocs used in ARM Linux */
367
368 HOWTO (R_ARM_COPY, /* type */
369 0, /* rightshift */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
371 32, /* bitsize */
372 FALSE, /* pc_relative */
373 0, /* bitpos */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
381
382 HOWTO (R_ARM_GLOB_DAT, /* type */
383 0, /* rightshift */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
385 32, /* bitsize */
386 FALSE, /* pc_relative */
387 0, /* bitpos */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
395
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
397 0, /* rightshift */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
399 32, /* bitsize */
400 FALSE, /* pc_relative */
401 0, /* bitpos */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
409
410 HOWTO (R_ARM_RELATIVE, /* type */
411 0, /* rightshift */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
413 32, /* bitsize */
414 FALSE, /* pc_relative */
415 0, /* bitpos */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
423
424 HOWTO (R_ARM_GOTOFF32, /* type */
425 0, /* rightshift */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
427 32, /* bitsize */
428 FALSE, /* pc_relative */
429 0, /* bitpos */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
437
438 HOWTO (R_ARM_GOTPC, /* type */
439 0, /* rightshift */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
441 32, /* bitsize */
442 TRUE, /* pc_relative */
443 0, /* bitpos */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
451
452 HOWTO (R_ARM_GOT32, /* type */
453 0, /* rightshift */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
455 32, /* bitsize */
456 FALSE, /* pc_relative */
457 0, /* bitpos */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
465
466 HOWTO (R_ARM_PLT32, /* type */
467 2, /* rightshift */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
469 24, /* bitsize */
470 TRUE, /* pc_relative */
471 0, /* bitpos */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
479
480 HOWTO (R_ARM_CALL, /* type */
481 2, /* rightshift */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
483 24, /* bitsize */
484 TRUE, /* pc_relative */
485 0, /* bitpos */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
493
494 HOWTO (R_ARM_JUMP24, /* type */
495 2, /* rightshift */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
497 24, /* bitsize */
498 TRUE, /* pc_relative */
499 0, /* bitpos */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
507
508 HOWTO (R_ARM_THM_JUMP24, /* type */
509 1, /* rightshift */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
511 24, /* bitsize */
512 TRUE, /* pc_relative */
513 0, /* bitpos */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
521
522 HOWTO (R_ARM_BASE_ABS, /* type */
523 0, /* rightshift */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
525 32, /* bitsize */
526 FALSE, /* pc_relative */
527 0, /* bitpos */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
535
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
537 0, /* rightshift */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
539 12, /* bitsize */
540 TRUE, /* pc_relative */
541 0, /* bitpos */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
549
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
551 0, /* rightshift */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
553 12, /* bitsize */
554 TRUE, /* pc_relative */
555 8, /* bitpos */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
563
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
565 0, /* rightshift */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
567 12, /* bitsize */
568 TRUE, /* pc_relative */
569 16, /* bitpos */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
577
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
579 0, /* rightshift */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
581 12, /* bitsize */
582 FALSE, /* pc_relative */
583 0, /* bitpos */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
591
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
593 0, /* rightshift */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
595 8, /* bitsize */
596 FALSE, /* pc_relative */
597 12, /* bitpos */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
605
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
607 0, /* rightshift */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
609 8, /* bitsize */
610 FALSE, /* pc_relative */
611 20, /* bitpos */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
619
620 HOWTO (R_ARM_TARGET1, /* type */
621 0, /* rightshift */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
623 32, /* bitsize */
624 FALSE, /* pc_relative */
625 0, /* bitpos */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
633
634 HOWTO (R_ARM_ROSEGREL32, /* type */
635 0, /* rightshift */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
637 32, /* bitsize */
638 FALSE, /* pc_relative */
639 0, /* bitpos */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
647
648 HOWTO (R_ARM_V4BX, /* type */
649 0, /* rightshift */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
651 32, /* bitsize */
652 FALSE, /* pc_relative */
653 0, /* bitpos */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
661
662 HOWTO (R_ARM_TARGET2, /* type */
663 0, /* rightshift */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
665 32, /* bitsize */
666 FALSE, /* pc_relative */
667 0, /* bitpos */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
675
676 HOWTO (R_ARM_PREL31, /* type */
677 0, /* rightshift */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
679 31, /* bitsize */
680 TRUE, /* pc_relative */
681 0, /* bitpos */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
689
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
691 0, /* rightshift */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
693 16, /* bitsize */
694 FALSE, /* pc_relative */
695 0, /* bitpos */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
703
704 HOWTO (R_ARM_MOVT_ABS, /* type */
705 0, /* rightshift */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
707 16, /* bitsize */
708 FALSE, /* pc_relative */
709 0, /* bitpos */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
717
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
719 0, /* rightshift */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
721 16, /* bitsize */
722 TRUE, /* pc_relative */
723 0, /* bitpos */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
731
732 HOWTO (R_ARM_MOVT_PREL, /* type */
733 0, /* rightshift */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
735 16, /* bitsize */
736 TRUE, /* pc_relative */
737 0, /* bitpos */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
745
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
747 0, /* rightshift */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
749 16, /* bitsize */
750 FALSE, /* pc_relative */
751 0, /* bitpos */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
759
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
761 0, /* rightshift */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
763 16, /* bitsize */
764 FALSE, /* pc_relative */
765 0, /* bitpos */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
773
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
775 0, /* rightshift */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
777 16, /* bitsize */
778 TRUE, /* pc_relative */
779 0, /* bitpos */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
787
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
789 0, /* rightshift */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
791 16, /* bitsize */
792 TRUE, /* pc_relative */
793 0, /* bitpos */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
801
802 HOWTO (R_ARM_THM_JUMP19, /* type */
803 1, /* rightshift */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
805 19, /* bitsize */
806 TRUE, /* pc_relative */
807 0, /* bitpos */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
815
816 HOWTO (R_ARM_THM_JUMP6, /* type */
817 1, /* rightshift */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
819 6, /* bitsize */
820 TRUE, /* pc_relative */
821 0, /* bitpos */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
829
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
832 versa. */
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
834 0, /* rightshift */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
836 13, /* bitsize */
837 TRUE, /* pc_relative */
838 0, /* bitpos */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
846
847 HOWTO (R_ARM_THM_PC12, /* type */
848 0, /* rightshift */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
850 13, /* bitsize */
851 TRUE, /* pc_relative */
852 0, /* bitpos */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
860
861 HOWTO (R_ARM_ABS32_NOI, /* type */
862 0, /* rightshift */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
864 32, /* bitsize */
865 FALSE, /* pc_relative */
866 0, /* bitpos */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
874
875 HOWTO (R_ARM_REL32_NOI, /* type */
876 0, /* rightshift */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
878 32, /* bitsize */
879 TRUE, /* pc_relative */
880 0, /* bitpos */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
888
889 /* Group relocations. */
890
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
892 0, /* rightshift */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
894 32, /* bitsize */
895 TRUE, /* pc_relative */
896 0, /* bitpos */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
904
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
906 0, /* rightshift */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
908 32, /* bitsize */
909 TRUE, /* pc_relative */
910 0, /* bitpos */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
918
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
920 0, /* rightshift */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
922 32, /* bitsize */
923 TRUE, /* pc_relative */
924 0, /* bitpos */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
932
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
934 0, /* rightshift */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
936 32, /* bitsize */
937 TRUE, /* pc_relative */
938 0, /* bitpos */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
946
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
948 0, /* rightshift */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
950 32, /* bitsize */
951 TRUE, /* pc_relative */
952 0, /* bitpos */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
960
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
962 0, /* rightshift */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
964 32, /* bitsize */
965 TRUE, /* pc_relative */
966 0, /* bitpos */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
974
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
976 0, /* rightshift */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
978 32, /* bitsize */
979 TRUE, /* pc_relative */
980 0, /* bitpos */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
988
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
990 0, /* rightshift */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
992 32, /* bitsize */
993 TRUE, /* pc_relative */
994 0, /* bitpos */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1002
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1004 0, /* rightshift */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 32, /* bitsize */
1007 TRUE, /* pc_relative */
1008 0, /* bitpos */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1016
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1018 0, /* rightshift */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 32, /* bitsize */
1021 TRUE, /* pc_relative */
1022 0, /* bitpos */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1030
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1032 0, /* rightshift */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1034 32, /* bitsize */
1035 TRUE, /* pc_relative */
1036 0, /* bitpos */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1044
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1046 0, /* rightshift */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1048 32, /* bitsize */
1049 TRUE, /* pc_relative */
1050 0, /* bitpos */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1058
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1060 0, /* rightshift */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1062 32, /* bitsize */
1063 TRUE, /* pc_relative */
1064 0, /* bitpos */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1072
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1074 0, /* rightshift */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1076 32, /* bitsize */
1077 TRUE, /* pc_relative */
1078 0, /* bitpos */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1086
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1088 0, /* rightshift */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1090 32, /* bitsize */
1091 TRUE, /* pc_relative */
1092 0, /* bitpos */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1100
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1102 0, /* rightshift */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1104 32, /* bitsize */
1105 TRUE, /* pc_relative */
1106 0, /* bitpos */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1114
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1116 0, /* rightshift */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1118 32, /* bitsize */
1119 TRUE, /* pc_relative */
1120 0, /* bitpos */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1128
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1130 0, /* rightshift */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1132 32, /* bitsize */
1133 TRUE, /* pc_relative */
1134 0, /* bitpos */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1142
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1144 0, /* rightshift */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1146 32, /* bitsize */
1147 TRUE, /* pc_relative */
1148 0, /* bitpos */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1156
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1158 0, /* rightshift */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1160 32, /* bitsize */
1161 TRUE, /* pc_relative */
1162 0, /* bitpos */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1170
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1172 0, /* rightshift */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1174 32, /* bitsize */
1175 TRUE, /* pc_relative */
1176 0, /* bitpos */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1184
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1186 0, /* rightshift */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 32, /* bitsize */
1189 TRUE, /* pc_relative */
1190 0, /* bitpos */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1198
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1200 0, /* rightshift */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1202 32, /* bitsize */
1203 TRUE, /* pc_relative */
1204 0, /* bitpos */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1212
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1214 0, /* rightshift */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1216 32, /* bitsize */
1217 TRUE, /* pc_relative */
1218 0, /* bitpos */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1226
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1228 0, /* rightshift */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1230 32, /* bitsize */
1231 TRUE, /* pc_relative */
1232 0, /* bitpos */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1240
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1242 0, /* rightshift */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1244 32, /* bitsize */
1245 TRUE, /* pc_relative */
1246 0, /* bitpos */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1254
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1256 0, /* rightshift */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1258 32, /* bitsize */
1259 TRUE, /* pc_relative */
1260 0, /* bitpos */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1268
1269 /* End of group relocations. */
1270
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1272 0, /* rightshift */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1274 16, /* bitsize */
1275 FALSE, /* pc_relative */
1276 0, /* bitpos */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1284
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1286 0, /* rightshift */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1288 16, /* bitsize */
1289 FALSE, /* pc_relative */
1290 0, /* bitpos */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1298
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1300 0, /* rightshift */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1302 16, /* bitsize */
1303 FALSE, /* pc_relative */
1304 0, /* bitpos */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1312
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1314 0, /* rightshift */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1316 16, /* bitsize */
1317 FALSE, /* pc_relative */
1318 0, /* bitpos */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1326
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1328 0, /* rightshift */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1330 16, /* bitsize */
1331 FALSE, /* pc_relative */
1332 0, /* bitpos */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1340
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1342 0, /* rightshift */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1344 16, /* bitsize */
1345 FALSE, /* pc_relative */
1346 0, /* bitpos */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1354
1355 EMPTY_HOWTO (90), /* Unallocated. */
1356 EMPTY_HOWTO (91),
1357 EMPTY_HOWTO (92),
1358 EMPTY_HOWTO (93),
1359
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 32, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 32, /* bitsize */
1392 TRUE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 12, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 12, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1431
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1434 0, /* rightshift */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1436 0, /* bitsize */
1437 FALSE, /* pc_relative */
1438 0, /* bitpos */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1443 0, /* src_mask */
1444 0, /* dst_mask */
1445 FALSE), /* pcrel_offset */
1446
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1449 0, /* rightshift */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1451 0, /* bitsize */
1452 FALSE, /* pc_relative */
1453 0, /* bitpos */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1458 0, /* src_mask */
1459 0, /* dst_mask */
1460 FALSE), /* pcrel_offset */
1461
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1463 1, /* rightshift */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1465 11, /* bitsize */
1466 TRUE, /* pc_relative */
1467 0, /* bitpos */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1475
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1477 1, /* rightshift */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1479 8, /* bitsize */
1480 TRUE, /* pc_relative */
1481 0, /* bitpos */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1489
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1492 0, /* rightshift */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1494 32, /* bitsize */
1495 FALSE, /* pc_relative */
1496 0, /* bitpos */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1504
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1506 0, /* rightshift */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1508 32, /* bitsize */
1509 FALSE, /* pc_relative */
1510 0, /* bitpos */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1518
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1520 0, /* rightshift */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1522 32, /* bitsize */
1523 FALSE, /* pc_relative */
1524 0, /* bitpos */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1532
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1534 0, /* rightshift */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1536 32, /* bitsize */
1537 FALSE, /* pc_relative */
1538 0, /* bitpos */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1546
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 12, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 12, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 12, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602 };
1603
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1607
1608 249-255 extended, currently unused, relocations: */
1609
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1611 {
1612 HOWTO (R_ARM_RREL32, /* type */
1613 0, /* rightshift */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1615 0, /* bitsize */
1616 FALSE, /* pc_relative */
1617 0, /* bitpos */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1622 0, /* src_mask */
1623 0, /* dst_mask */
1624 FALSE), /* pcrel_offset */
1625
1626 HOWTO (R_ARM_RABS32, /* type */
1627 0, /* rightshift */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1629 0, /* bitsize */
1630 FALSE, /* pc_relative */
1631 0, /* bitpos */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1636 0, /* src_mask */
1637 0, /* dst_mask */
1638 FALSE), /* pcrel_offset */
1639
1640 HOWTO (R_ARM_RPC24, /* type */
1641 0, /* rightshift */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1643 0, /* bitsize */
1644 FALSE, /* pc_relative */
1645 0, /* bitpos */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1650 0, /* src_mask */
1651 0, /* dst_mask */
1652 FALSE), /* pcrel_offset */
1653
1654 HOWTO (R_ARM_RBASE, /* type */
1655 0, /* rightshift */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1657 0, /* bitsize */
1658 FALSE, /* pc_relative */
1659 0, /* bitpos */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1664 0, /* src_mask */
1665 0, /* dst_mask */
1666 FALSE) /* pcrel_offset */
1667 };
1668
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1671 {
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1674
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1678
1679 return NULL;
1680 }
1681
1682 static void
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1685 {
1686 unsigned int r_type;
1687
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1690 }
1691
1692 struct elf32_arm_reloc_map
1693 {
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1696 };
1697
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1700 {
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1725 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1726 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1727 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1728 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1729 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1730 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1731 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1732 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1733 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1734 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1735 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1736 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1737 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1738 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1739 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1740 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1741 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1742 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1743 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1744 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1745 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1746 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1747 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1748 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1750 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1751 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1752 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1754 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1755 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1756 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1757 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1758 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1759 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1760 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1761 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1762 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1763 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1764 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1765 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1766 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1768 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1769 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1770 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1771 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1772 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1773 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1774 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1775 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1776 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1777 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1778 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1779 };
1780
1781 static reloc_howto_type *
1782 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1783 bfd_reloc_code_real_type code)
1784 {
1785 unsigned int i;
1786
1787 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1788 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1789 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1790
1791 return NULL;
1792 }
1793
1794 static reloc_howto_type *
1795 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1796 const char *r_name)
1797 {
1798 unsigned int i;
1799
1800 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1801 if (elf32_arm_howto_table_1[i].name != NULL
1802 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1803 return &elf32_arm_howto_table_1[i];
1804
1805 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1806 if (elf32_arm_howto_table_2[i].name != NULL
1807 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1808 return &elf32_arm_howto_table_2[i];
1809
1810 return NULL;
1811 }
1812
1813 /* Support for core dump NOTE sections. */
1814
1815 static bfd_boolean
1816 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1817 {
1818 int offset;
1819 size_t size;
1820
1821 switch (note->descsz)
1822 {
1823 default:
1824 return FALSE;
1825
1826 case 148: /* Linux/ARM 32-bit. */
1827 /* pr_cursig */
1828 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1829
1830 /* pr_pid */
1831 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1832
1833 /* pr_reg */
1834 offset = 72;
1835 size = 72;
1836
1837 break;
1838 }
1839
1840 /* Make a ".reg/999" section. */
1841 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1842 size, note->descpos + offset);
1843 }
1844
1845 static bfd_boolean
1846 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1847 {
1848 switch (note->descsz)
1849 {
1850 default:
1851 return FALSE;
1852
1853 case 124: /* Linux/ARM elf_prpsinfo. */
1854 elf_tdata (abfd)->core_program
1855 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1856 elf_tdata (abfd)->core_command
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1858 }
1859
1860 /* Note that for some reason, a spurious space is tacked
1861 onto the end of the args in some (at least one anyway)
1862 implementations, so strip it off if it exists. */
1863 {
1864 char *command = elf_tdata (abfd)->core_command;
1865 int n = strlen (command);
1866
1867 if (0 < n && command[n - 1] == ' ')
1868 command[n - 1] = '\0';
1869 }
1870
1871 return TRUE;
1872 }
1873
1874 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1875 #define TARGET_LITTLE_NAME "elf32-littlearm"
1876 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1877 #define TARGET_BIG_NAME "elf32-bigarm"
1878
1879 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1880 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1881
1882 typedef unsigned long int insn32;
1883 typedef unsigned short int insn16;
1884
1885 /* In lieu of proper flags, assume all EABIv4 or later objects are
1886 interworkable. */
1887 #define INTERWORK_FLAG(abfd) \
1888 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1889 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1890 || ((abfd)->flags & BFD_LINKER_CREATED))
1891
1892 /* The linker script knows the section names for placement.
1893 The entry_names are used to do simple name mangling on the stubs.
1894 Given a function name, and its type, the stub can be found. The
1895 name can be changed. The only requirement is the %s be present. */
1896 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1897 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1898
1899 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1900 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1901
1902 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1903 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1904
1905 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1906 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1907
1908 #define STUB_ENTRY_NAME "__%s_veneer"
1909
1910 /* The name of the dynamic interpreter. This is put in the .interp
1911 section. */
1912 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1913
1914 #ifdef FOUR_WORD_PLT
1915
1916 /* The first entry in a procedure linkage table looks like
1917 this. It is set up so that any shared library function that is
1918 called before the relocation has been set up calls the dynamic
1919 linker first. */
1920 static const bfd_vma elf32_arm_plt0_entry [] =
1921 {
1922 0xe52de004, /* str lr, [sp, #-4]! */
1923 0xe59fe010, /* ldr lr, [pc, #16] */
1924 0xe08fe00e, /* add lr, pc, lr */
1925 0xe5bef008, /* ldr pc, [lr, #8]! */
1926 };
1927
1928 /* Subsequent entries in a procedure linkage table look like
1929 this. */
1930 static const bfd_vma elf32_arm_plt_entry [] =
1931 {
1932 0xe28fc600, /* add ip, pc, #NN */
1933 0xe28cca00, /* add ip, ip, #NN */
1934 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1935 0x00000000, /* unused */
1936 };
1937
1938 #else
1939
1940 /* The first entry in a procedure linkage table looks like
1941 this. It is set up so that any shared library function that is
1942 called before the relocation has been set up calls the dynamic
1943 linker first. */
1944 static const bfd_vma elf32_arm_plt0_entry [] =
1945 {
1946 0xe52de004, /* str lr, [sp, #-4]! */
1947 0xe59fe004, /* ldr lr, [pc, #4] */
1948 0xe08fe00e, /* add lr, pc, lr */
1949 0xe5bef008, /* ldr pc, [lr, #8]! */
1950 0x00000000, /* &GOT[0] - . */
1951 };
1952
1953 /* Subsequent entries in a procedure linkage table look like
1954 this. */
1955 static const bfd_vma elf32_arm_plt_entry [] =
1956 {
1957 0xe28fc600, /* add ip, pc, #0xNN00000 */
1958 0xe28cca00, /* add ip, ip, #0xNN000 */
1959 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1960 };
1961
1962 #endif
1963
1964 /* The format of the first entry in the procedure linkage table
1965 for a VxWorks executable. */
1966 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1967 {
1968 0xe52dc008, /* str ip,[sp,#-8]! */
1969 0xe59fc000, /* ldr ip,[pc] */
1970 0xe59cf008, /* ldr pc,[ip,#8] */
1971 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1972 };
1973
1974 /* The format of subsequent entries in a VxWorks executable. */
1975 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1976 {
1977 0xe59fc000, /* ldr ip,[pc] */
1978 0xe59cf000, /* ldr pc,[ip] */
1979 0x00000000, /* .long @got */
1980 0xe59fc000, /* ldr ip,[pc] */
1981 0xea000000, /* b _PLT */
1982 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1983 };
1984
1985 /* The format of entries in a VxWorks shared library. */
1986 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1987 {
1988 0xe59fc000, /* ldr ip,[pc] */
1989 0xe79cf009, /* ldr pc,[ip,r9] */
1990 0x00000000, /* .long @got */
1991 0xe59fc000, /* ldr ip,[pc] */
1992 0xe599f008, /* ldr pc,[r9,#8] */
1993 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1994 };
1995
1996 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1997 #define PLT_THUMB_STUB_SIZE 4
1998 static const bfd_vma elf32_arm_plt_thumb_stub [] =
1999 {
2000 0x4778, /* bx pc */
2001 0x46c0 /* nop */
2002 };
2003
2004 /* The entries in a PLT when using a DLL-based target with multiple
2005 address spaces. */
2006 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2007 {
2008 0xe51ff004, /* ldr pc, [pc, #-4] */
2009 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2010 };
2011
2012 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2013 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2014 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2015 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2016 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2017 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2018
2019 enum stub_insn_type
2020 {
2021 THUMB16_TYPE = 1,
2022 THUMB32_TYPE,
2023 ARM_TYPE,
2024 DATA_TYPE
2025 };
2026
2027 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2028 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2029 is inserted in arm_build_one_stub(). */
2030 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2031 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2032 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2033 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2034 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2035 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2036
2037 typedef struct
2038 {
2039 bfd_vma data;
2040 enum stub_insn_type type;
2041 unsigned int r_type;
2042 int reloc_addend;
2043 } insn_sequence;
2044
2045 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2046 to reach the stub if necessary. */
2047 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2048 {
2049 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2050 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2051 };
2052
2053 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2054 available. */
2055 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2056 {
2057 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2058 ARM_INSN(0xe12fff1c), /* bx ip */
2059 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2060 };
2061
2062 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2063 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2064 {
2065 THUMB16_INSN(0xb401), /* push {r0} */
2066 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2067 THUMB16_INSN(0x4684), /* mov ip, r0 */
2068 THUMB16_INSN(0xbc01), /* pop {r0} */
2069 THUMB16_INSN(0x4760), /* bx ip */
2070 THUMB16_INSN(0xbf00), /* nop */
2071 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2072 };
2073
2074 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2075 allowed. */
2076 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2077 {
2078 THUMB16_INSN(0x4778), /* bx pc */
2079 THUMB16_INSN(0x46c0), /* nop */
2080 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2081 ARM_INSN(0xe12fff1c), /* bx ip */
2082 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2083 };
2084
2085 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2086 available. */
2087 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2088 {
2089 THUMB16_INSN(0x4778), /* bx pc */
2090 THUMB16_INSN(0x46c0), /* nop */
2091 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2092 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2093 };
2094
2095 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2096 one, when the destination is close enough. */
2097 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2098 {
2099 THUMB16_INSN(0x4778), /* bx pc */
2100 THUMB16_INSN(0x46c0), /* nop */
2101 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2102 };
2103
2104 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2105 blx to reach the stub if necessary. */
2106 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2107 {
2108 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2109 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2110 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2111 };
2112
2113 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2114 blx to reach the stub if necessary. We can not add into pc;
2115 it is not guaranteed to mode switch (different in ARMv6 and
2116 ARMv7). */
2117 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2118 {
2119 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2120 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2121 ARM_INSN(0xe12fff1c), /* bx ip */
2122 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2123 };
2124
2125 /* V4T ARM -> ARM long branch stub, PIC. */
2126 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2127 {
2128 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2129 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2130 ARM_INSN(0xe12fff1c), /* bx ip */
2131 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2132 };
2133
2134 /* V4T Thumb -> ARM long branch stub, PIC. */
2135 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2136 {
2137 THUMB16_INSN(0x4778), /* bx pc */
2138 THUMB16_INSN(0x46c0), /* nop */
2139 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2140 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2141 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2142 };
2143
2144 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2145 architectures. */
2146 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2147 {
2148 THUMB16_INSN(0xb401), /* push {r0} */
2149 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2150 THUMB16_INSN(0x46fc), /* mov ip, pc */
2151 THUMB16_INSN(0x4484), /* add ip, r0 */
2152 THUMB16_INSN(0xbc01), /* pop {r0} */
2153 THUMB16_INSN(0x4760), /* bx ip */
2154 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2155 };
2156
2157 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2158 allowed. */
2159 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2160 {
2161 THUMB16_INSN(0x4778), /* bx pc */
2162 THUMB16_INSN(0x46c0), /* nop */
2163 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2164 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2165 ARM_INSN(0xe12fff1c), /* bx ip */
2166 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2167 };
2168
2169 /* Cortex-A8 erratum-workaround stubs. */
2170
2171 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2172 can't use a conditional branch to reach this stub). */
2173
2174 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2175 {
2176 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2177 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2178 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2179 };
2180
2181 /* Stub used for b.w and bl.w instructions. */
2182
2183 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2184 {
2185 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2186 };
2187
2188 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2189 {
2190 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2191 };
2192
2193 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2194 instruction (which switches to ARM mode) to point to this stub. Jump to the
2195 real destination using an ARM-mode branch. */
2196
2197 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2198 {
2199 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2200 };
2201
2202 /* Section name for stubs is the associated section name plus this
2203 string. */
2204 #define STUB_SUFFIX ".stub"
2205
2206 /* One entry per long/short branch stub defined above. */
2207 #define DEF_STUBS \
2208 DEF_STUB(long_branch_any_any) \
2209 DEF_STUB(long_branch_v4t_arm_thumb) \
2210 DEF_STUB(long_branch_thumb_only) \
2211 DEF_STUB(long_branch_v4t_thumb_thumb) \
2212 DEF_STUB(long_branch_v4t_thumb_arm) \
2213 DEF_STUB(short_branch_v4t_thumb_arm) \
2214 DEF_STUB(long_branch_any_arm_pic) \
2215 DEF_STUB(long_branch_any_thumb_pic) \
2216 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2219 DEF_STUB(long_branch_thumb_only_pic) \
2220 DEF_STUB(a8_veneer_b_cond) \
2221 DEF_STUB(a8_veneer_b) \
2222 DEF_STUB(a8_veneer_bl) \
2223 DEF_STUB(a8_veneer_blx)
2224
2225 #define DEF_STUB(x) arm_stub_##x,
2226 enum elf32_arm_stub_type {
2227 arm_stub_none,
2228 DEF_STUBS
2229 /* Note the first a8_veneer type */
2230 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template_sequence;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_TDATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2519
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2523
2524 /* Array to keep track of which stub sections have been created, and
2525 information on stub grouping. */
2526 struct map_stub
2527 {
2528 /* This is the section to which stubs in the group will be
2529 attached. */
2530 asection *link_sec;
2531 /* The stub section. */
2532 asection *stub_sec;
2533 };
2534
2535 /* ARM ELF linker hash table. */
2536 struct elf32_arm_link_hash_table
2537 {
2538 /* The main hash table. */
2539 struct elf_link_hash_table root;
2540
2541 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2542 bfd_size_type thumb_glue_size;
2543
2544 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2545 bfd_size_type arm_glue_size;
2546
2547 /* The size in bytes of section containing the ARMv4 BX veneers. */
2548 bfd_size_type bx_glue_size;
2549
2550 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2551 veneer has been populated. */
2552 bfd_vma bx_glue_offset[15];
2553
2554 /* The size in bytes of the section containing glue for VFP11 erratum
2555 veneers. */
2556 bfd_size_type vfp11_erratum_glue_size;
2557
2558 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2559 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2560 elf32_arm_write_section(). */
2561 struct a8_erratum_fix *a8_erratum_fixes;
2562 unsigned int num_a8_erratum_fixes;
2563
2564 /* An arbitrary input BFD chosen to hold the glue sections. */
2565 bfd * bfd_of_glue_owner;
2566
2567 /* Nonzero to output a BE8 image. */
2568 int byteswap_code;
2569
2570 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2571 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2572 int target1_is_rel;
2573
2574 /* The relocation to use for R_ARM_TARGET2 relocations. */
2575 int target2_reloc;
2576
2577 /* 0 = Ignore R_ARM_V4BX.
2578 1 = Convert BX to MOV PC.
2579 2 = Generate v4 interworing stubs. */
2580 int fix_v4bx;
2581
2582 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2583 int fix_cortex_a8;
2584
2585 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2586 int use_blx;
2587
2588 /* What sort of code sequences we should look for which may trigger the
2589 VFP11 denorm erratum. */
2590 bfd_arm_vfp11_fix vfp11_fix;
2591
2592 /* Global counter for the number of fixes we have emitted. */
2593 int num_vfp11_fixes;
2594
2595 /* Nonzero to force PIC branch veneers. */
2596 int pic_veneer;
2597
2598 /* The number of bytes in the initial entry in the PLT. */
2599 bfd_size_type plt_header_size;
2600
2601 /* The number of bytes in the subsequent PLT etries. */
2602 bfd_size_type plt_entry_size;
2603
2604 /* True if the target system is VxWorks. */
2605 int vxworks_p;
2606
2607 /* True if the target system is Symbian OS. */
2608 int symbian_p;
2609
2610 /* True if the target uses REL relocations. */
2611 int use_rel;
2612
2613 /* Short-cuts to get to dynamic linker sections. */
2614 asection *sgot;
2615 asection *sgotplt;
2616 asection *srelgot;
2617 asection *splt;
2618 asection *srelplt;
2619 asection *sdynbss;
2620 asection *srelbss;
2621
2622 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2623 asection *srelplt2;
2624
2625 /* Data for R_ARM_TLS_LDM32 relocations. */
2626 union
2627 {
2628 bfd_signed_vma refcount;
2629 bfd_vma offset;
2630 } tls_ldm_got;
2631
2632 /* Small local sym cache. */
2633 struct sym_cache sym_cache;
2634
2635 /* For convenience in allocate_dynrelocs. */
2636 bfd * obfd;
2637
2638 /* The stub hash table. */
2639 struct bfd_hash_table stub_hash_table;
2640
2641 /* Linker stub bfd. */
2642 bfd *stub_bfd;
2643
2644 /* Linker call-backs. */
2645 asection * (*add_stub_section) (const char *, asection *);
2646 void (*layout_sections_again) (void);
2647
2648 /* Array to keep track of which stub sections have been created, and
2649 information on stub grouping. */
2650 struct map_stub *stub_group;
2651
2652 /* Assorted information used by elf32_arm_size_stubs. */
2653 unsigned int bfd_count;
2654 int top_index;
2655 asection **input_list;
2656 };
2657
2658 /* Create an entry in an ARM ELF linker hash table. */
2659
2660 static struct bfd_hash_entry *
2661 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2662 struct bfd_hash_table * table,
2663 const char * string)
2664 {
2665 struct elf32_arm_link_hash_entry * ret =
2666 (struct elf32_arm_link_hash_entry *) entry;
2667
2668 /* Allocate the structure if it has not already been allocated by a
2669 subclass. */
2670 if (ret == NULL)
2671 ret = (struct elf32_arm_link_hash_entry *)
2672 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2673 if (ret == NULL)
2674 return (struct bfd_hash_entry *) ret;
2675
2676 /* Call the allocation method of the superclass. */
2677 ret = ((struct elf32_arm_link_hash_entry *)
2678 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2679 table, string));
2680 if (ret != NULL)
2681 {
2682 ret->relocs_copied = NULL;
2683 ret->tls_type = GOT_UNKNOWN;
2684 ret->plt_thumb_refcount = 0;
2685 ret->plt_maybe_thumb_refcount = 0;
2686 ret->plt_got_offset = -1;
2687 ret->export_glue = NULL;
2688
2689 ret->stub_cache = NULL;
2690 }
2691
2692 return (struct bfd_hash_entry *) ret;
2693 }
2694
2695 /* Initialize an entry in the stub hash table. */
2696
2697 static struct bfd_hash_entry *
2698 stub_hash_newfunc (struct bfd_hash_entry *entry,
2699 struct bfd_hash_table *table,
2700 const char *string)
2701 {
2702 /* Allocate the structure if it has not already been allocated by a
2703 subclass. */
2704 if (entry == NULL)
2705 {
2706 entry = (struct bfd_hash_entry *)
2707 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2708 if (entry == NULL)
2709 return entry;
2710 }
2711
2712 /* Call the allocation method of the superclass. */
2713 entry = bfd_hash_newfunc (entry, table, string);
2714 if (entry != NULL)
2715 {
2716 struct elf32_arm_stub_hash_entry *eh;
2717
2718 /* Initialize the local fields. */
2719 eh = (struct elf32_arm_stub_hash_entry *) entry;
2720 eh->stub_sec = NULL;
2721 eh->stub_offset = 0;
2722 eh->target_value = 0;
2723 eh->target_section = NULL;
2724 eh->target_addend = 0;
2725 eh->orig_insn = 0;
2726 eh->stub_type = arm_stub_none;
2727 eh->stub_size = 0;
2728 eh->stub_template = NULL;
2729 eh->stub_template_size = 0;
2730 eh->h = NULL;
2731 eh->id_sec = NULL;
2732 eh->output_name = NULL;
2733 }
2734
2735 return entry;
2736 }
2737
2738 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2739 shortcuts to them in our hash table. */
2740
2741 static bfd_boolean
2742 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2743 {
2744 struct elf32_arm_link_hash_table *htab;
2745
2746 htab = elf32_arm_hash_table (info);
2747 /* BPABI objects never have a GOT, or associated sections. */
2748 if (htab->symbian_p)
2749 return TRUE;
2750
2751 if (! _bfd_elf_create_got_section (dynobj, info))
2752 return FALSE;
2753
2754 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2755 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2756 if (!htab->sgot || !htab->sgotplt)
2757 abort ();
2758
2759 htab->srelgot = bfd_get_section_by_name (dynobj,
2760 RELOC_SECTION (htab, ".got"));
2761 if (htab->srelgot == NULL)
2762 return FALSE;
2763 return TRUE;
2764 }
2765
2766 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2767 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2768 hash table. */
2769
2770 static bfd_boolean
2771 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2772 {
2773 struct elf32_arm_link_hash_table *htab;
2774
2775 htab = elf32_arm_hash_table (info);
2776 if (!htab->sgot && !create_got_section (dynobj, info))
2777 return FALSE;
2778
2779 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2780 return FALSE;
2781
2782 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2783 htab->srelplt = bfd_get_section_by_name (dynobj,
2784 RELOC_SECTION (htab, ".plt"));
2785 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2786 if (!info->shared)
2787 htab->srelbss = bfd_get_section_by_name (dynobj,
2788 RELOC_SECTION (htab, ".bss"));
2789
2790 if (htab->vxworks_p)
2791 {
2792 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2793 return FALSE;
2794
2795 if (info->shared)
2796 {
2797 htab->plt_header_size = 0;
2798 htab->plt_entry_size
2799 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2800 }
2801 else
2802 {
2803 htab->plt_header_size
2804 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2805 htab->plt_entry_size
2806 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2807 }
2808 }
2809
2810 if (!htab->splt
2811 || !htab->srelplt
2812 || !htab->sdynbss
2813 || (!info->shared && !htab->srelbss))
2814 abort ();
2815
2816 return TRUE;
2817 }
2818
2819 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2820
2821 static void
2822 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2823 struct elf_link_hash_entry *dir,
2824 struct elf_link_hash_entry *ind)
2825 {
2826 struct elf32_arm_link_hash_entry *edir, *eind;
2827
2828 edir = (struct elf32_arm_link_hash_entry *) dir;
2829 eind = (struct elf32_arm_link_hash_entry *) ind;
2830
2831 if (eind->relocs_copied != NULL)
2832 {
2833 if (edir->relocs_copied != NULL)
2834 {
2835 struct elf32_arm_relocs_copied **pp;
2836 struct elf32_arm_relocs_copied *p;
2837
2838 /* Add reloc counts against the indirect sym to the direct sym
2839 list. Merge any entries against the same section. */
2840 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2841 {
2842 struct elf32_arm_relocs_copied *q;
2843
2844 for (q = edir->relocs_copied; q != NULL; q = q->next)
2845 if (q->section == p->section)
2846 {
2847 q->pc_count += p->pc_count;
2848 q->count += p->count;
2849 *pp = p->next;
2850 break;
2851 }
2852 if (q == NULL)
2853 pp = &p->next;
2854 }
2855 *pp = edir->relocs_copied;
2856 }
2857
2858 edir->relocs_copied = eind->relocs_copied;
2859 eind->relocs_copied = NULL;
2860 }
2861
2862 if (ind->root.type == bfd_link_hash_indirect)
2863 {
2864 /* Copy over PLT info. */
2865 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2866 eind->plt_thumb_refcount = 0;
2867 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2868 eind->plt_maybe_thumb_refcount = 0;
2869
2870 if (dir->got.refcount <= 0)
2871 {
2872 edir->tls_type = eind->tls_type;
2873 eind->tls_type = GOT_UNKNOWN;
2874 }
2875 }
2876
2877 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2878 }
2879
2880 /* Create an ARM elf linker hash table. */
2881
2882 static struct bfd_link_hash_table *
2883 elf32_arm_link_hash_table_create (bfd *abfd)
2884 {
2885 struct elf32_arm_link_hash_table *ret;
2886 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2887
2888 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2889 if (ret == NULL)
2890 return NULL;
2891
2892 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2893 elf32_arm_link_hash_newfunc,
2894 sizeof (struct elf32_arm_link_hash_entry)))
2895 {
2896 free (ret);
2897 return NULL;
2898 }
2899
2900 ret->sgot = NULL;
2901 ret->sgotplt = NULL;
2902 ret->srelgot = NULL;
2903 ret->splt = NULL;
2904 ret->srelplt = NULL;
2905 ret->sdynbss = NULL;
2906 ret->srelbss = NULL;
2907 ret->srelplt2 = NULL;
2908 ret->thumb_glue_size = 0;
2909 ret->arm_glue_size = 0;
2910 ret->bx_glue_size = 0;
2911 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2912 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2913 ret->vfp11_erratum_glue_size = 0;
2914 ret->num_vfp11_fixes = 0;
2915 ret->fix_cortex_a8 = 0;
2916 ret->bfd_of_glue_owner = NULL;
2917 ret->byteswap_code = 0;
2918 ret->target1_is_rel = 0;
2919 ret->target2_reloc = R_ARM_NONE;
2920 #ifdef FOUR_WORD_PLT
2921 ret->plt_header_size = 16;
2922 ret->plt_entry_size = 16;
2923 #else
2924 ret->plt_header_size = 20;
2925 ret->plt_entry_size = 12;
2926 #endif
2927 ret->fix_v4bx = 0;
2928 ret->use_blx = 0;
2929 ret->vxworks_p = 0;
2930 ret->symbian_p = 0;
2931 ret->use_rel = 1;
2932 ret->sym_cache.abfd = NULL;
2933 ret->obfd = abfd;
2934 ret->tls_ldm_got.refcount = 0;
2935 ret->stub_bfd = NULL;
2936 ret->add_stub_section = NULL;
2937 ret->layout_sections_again = NULL;
2938 ret->stub_group = NULL;
2939 ret->bfd_count = 0;
2940 ret->top_index = 0;
2941 ret->input_list = NULL;
2942
2943 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2944 sizeof (struct elf32_arm_stub_hash_entry)))
2945 {
2946 free (ret);
2947 return NULL;
2948 }
2949
2950 return &ret->root.root;
2951 }
2952
2953 /* Free the derived linker hash table. */
2954
2955 static void
2956 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2957 {
2958 struct elf32_arm_link_hash_table *ret
2959 = (struct elf32_arm_link_hash_table *) hash;
2960
2961 bfd_hash_table_free (&ret->stub_hash_table);
2962 _bfd_generic_link_hash_table_free (hash);
2963 }
2964
2965 /* Determine if we're dealing with a Thumb only architecture. */
2966
2967 static bfd_boolean
2968 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2969 {
2970 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2971 Tag_CPU_arch);
2972 int profile;
2973
2974 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2975 return FALSE;
2976
2977 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2978 Tag_CPU_arch_profile);
2979
2980 return profile == 'M';
2981 }
2982
2983 /* Determine if we're dealing with a Thumb-2 object. */
2984
2985 static bfd_boolean
2986 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2987 {
2988 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2989 Tag_CPU_arch);
2990 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2991 }
2992
2993 /* Determine what kind of NOPs are available. */
2994
2995 static bfd_boolean
2996 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
2997 {
2998 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2999 Tag_CPU_arch);
3000 return arch == TAG_CPU_ARCH_V6T2
3001 || arch == TAG_CPU_ARCH_V6K
3002 || arch == TAG_CPU_ARCH_V7
3003 || arch == TAG_CPU_ARCH_V7E_M;
3004 }
3005
3006 static bfd_boolean
3007 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3008 {
3009 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3010 Tag_CPU_arch);
3011 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3012 || arch == TAG_CPU_ARCH_V7E_M);
3013 }
3014
3015 static bfd_boolean
3016 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3017 {
3018 switch (stub_type)
3019 {
3020 case arm_stub_long_branch_thumb_only:
3021 case arm_stub_long_branch_v4t_thumb_arm:
3022 case arm_stub_short_branch_v4t_thumb_arm:
3023 case arm_stub_long_branch_v4t_thumb_arm_pic:
3024 case arm_stub_long_branch_thumb_only_pic:
3025 return TRUE;
3026 case arm_stub_none:
3027 BFD_FAIL ();
3028 return FALSE;
3029 break;
3030 default:
3031 return FALSE;
3032 }
3033 }
3034
3035 /* Determine the type of stub needed, if any, for a call. */
3036
3037 static enum elf32_arm_stub_type
3038 arm_type_of_stub (struct bfd_link_info *info,
3039 asection *input_sec,
3040 const Elf_Internal_Rela *rel,
3041 unsigned char st_type,
3042 struct elf32_arm_link_hash_entry *hash,
3043 bfd_vma destination,
3044 asection *sym_sec,
3045 bfd *input_bfd,
3046 const char *name)
3047 {
3048 bfd_vma location;
3049 bfd_signed_vma branch_offset;
3050 unsigned int r_type;
3051 struct elf32_arm_link_hash_table * globals;
3052 int thumb2;
3053 int thumb_only;
3054 enum elf32_arm_stub_type stub_type = arm_stub_none;
3055 int use_plt = 0;
3056
3057 /* We don't know the actual type of destination in case it is of
3058 type STT_SECTION: give up. */
3059 if (st_type == STT_SECTION)
3060 return stub_type;
3061
3062 globals = elf32_arm_hash_table (info);
3063
3064 thumb_only = using_thumb_only (globals);
3065
3066 thumb2 = using_thumb2 (globals);
3067
3068 /* Determine where the call point is. */
3069 location = (input_sec->output_offset
3070 + input_sec->output_section->vma
3071 + rel->r_offset);
3072
3073 branch_offset = (bfd_signed_vma)(destination - location);
3074
3075 r_type = ELF32_R_TYPE (rel->r_info);
3076
3077 /* Keep a simpler condition, for the sake of clarity. */
3078 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3079 {
3080 use_plt = 1;
3081 /* Note when dealing with PLT entries: the main PLT stub is in
3082 ARM mode, so if the branch is in Thumb mode, another
3083 Thumb->ARM stub will be inserted later just before the ARM
3084 PLT stub. We don't take this extra distance into account
3085 here, because if a long branch stub is needed, we'll add a
3086 Thumb->Arm one and branch directly to the ARM PLT entry
3087 because it avoids spreading offset corrections in several
3088 places. */
3089 }
3090
3091 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3092 {
3093 /* Handle cases where:
3094 - this call goes too far (different Thumb/Thumb2 max
3095 distance)
3096 - it's a Thumb->Arm call and blx is not available, or it's a
3097 Thumb->Arm branch (not bl). A stub is needed in this case,
3098 but only if this call is not through a PLT entry. Indeed,
3099 PLT stubs handle mode switching already.
3100 */
3101 if ((!thumb2
3102 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3103 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3104 || (thumb2
3105 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3106 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3107 || ((st_type != STT_ARM_TFUNC)
3108 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3109 || (r_type == R_ARM_THM_JUMP24))
3110 && !use_plt))
3111 {
3112 if (st_type == STT_ARM_TFUNC)
3113 {
3114 /* Thumb to thumb. */
3115 if (!thumb_only)
3116 {
3117 stub_type = (info->shared | globals->pic_veneer)
3118 /* PIC stubs. */
3119 ? ((globals->use_blx
3120 && (r_type ==R_ARM_THM_CALL))
3121 /* V5T and above. Stub starts with ARM code, so
3122 we must be able to switch mode before
3123 reaching it, which is only possible for 'bl'
3124 (ie R_ARM_THM_CALL relocation). */
3125 ? arm_stub_long_branch_any_thumb_pic
3126 /* On V4T, use Thumb code only. */
3127 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3128
3129 /* non-PIC stubs. */
3130 : ((globals->use_blx
3131 && (r_type ==R_ARM_THM_CALL))
3132 /* V5T and above. */
3133 ? arm_stub_long_branch_any_any
3134 /* V4T. */
3135 : arm_stub_long_branch_v4t_thumb_thumb);
3136 }
3137 else
3138 {
3139 stub_type = (info->shared | globals->pic_veneer)
3140 /* PIC stub. */
3141 ? arm_stub_long_branch_thumb_only_pic
3142 /* non-PIC stub. */
3143 : arm_stub_long_branch_thumb_only;
3144 }
3145 }
3146 else
3147 {
3148 /* Thumb to arm. */
3149 if (sym_sec != NULL
3150 && sym_sec->owner != NULL
3151 && !INTERWORK_FLAG (sym_sec->owner))
3152 {
3153 (*_bfd_error_handler)
3154 (_("%B(%s): warning: interworking not enabled.\n"
3155 " first occurrence: %B: Thumb call to ARM"),
3156 sym_sec->owner, input_bfd, name);
3157 }
3158
3159 stub_type = (info->shared | globals->pic_veneer)
3160 /* PIC stubs. */
3161 ? ((globals->use_blx
3162 && (r_type ==R_ARM_THM_CALL))
3163 /* V5T and above. */
3164 ? arm_stub_long_branch_any_arm_pic
3165 /* V4T PIC stub. */
3166 : arm_stub_long_branch_v4t_thumb_arm_pic)
3167
3168 /* non-PIC stubs. */
3169 : ((globals->use_blx
3170 && (r_type ==R_ARM_THM_CALL))
3171 /* V5T and above. */
3172 ? arm_stub_long_branch_any_any
3173 /* V4T. */
3174 : arm_stub_long_branch_v4t_thumb_arm);
3175
3176 /* Handle v4t short branches. */
3177 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3178 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3179 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3180 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3181 }
3182 }
3183 }
3184 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3185 {
3186 if (st_type == STT_ARM_TFUNC)
3187 {
3188 /* Arm to thumb. */
3189
3190 if (sym_sec != NULL
3191 && sym_sec->owner != NULL
3192 && !INTERWORK_FLAG (sym_sec->owner))
3193 {
3194 (*_bfd_error_handler)
3195 (_("%B(%s): warning: interworking not enabled.\n"
3196 " first occurrence: %B: ARM call to Thumb"),
3197 sym_sec->owner, input_bfd, name);
3198 }
3199
3200 /* We have an extra 2-bytes reach because of
3201 the mode change (bit 24 (H) of BLX encoding). */
3202 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3203 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3204 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3205 || (r_type == R_ARM_JUMP24)
3206 || (r_type == R_ARM_PLT32))
3207 {
3208 stub_type = (info->shared | globals->pic_veneer)
3209 /* PIC stubs. */
3210 ? ((globals->use_blx)
3211 /* V5T and above. */
3212 ? arm_stub_long_branch_any_thumb_pic
3213 /* V4T stub. */
3214 : arm_stub_long_branch_v4t_arm_thumb_pic)
3215
3216 /* non-PIC stubs. */
3217 : ((globals->use_blx)
3218 /* V5T and above. */
3219 ? arm_stub_long_branch_any_any
3220 /* V4T. */
3221 : arm_stub_long_branch_v4t_arm_thumb);
3222 }
3223 }
3224 else
3225 {
3226 /* Arm to arm. */
3227 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3228 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3229 {
3230 stub_type = (info->shared | globals->pic_veneer)
3231 /* PIC stubs. */
3232 ? arm_stub_long_branch_any_arm_pic
3233 /* non-PIC stubs. */
3234 : arm_stub_long_branch_any_any;
3235 }
3236 }
3237 }
3238
3239 return stub_type;
3240 }
3241
3242 /* Build a name for an entry in the stub hash table. */
3243
3244 static char *
3245 elf32_arm_stub_name (const asection *input_section,
3246 const asection *sym_sec,
3247 const struct elf32_arm_link_hash_entry *hash,
3248 const Elf_Internal_Rela *rel)
3249 {
3250 char *stub_name;
3251 bfd_size_type len;
3252
3253 if (hash)
3254 {
3255 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3256 stub_name = (char *) bfd_malloc (len);
3257 if (stub_name != NULL)
3258 sprintf (stub_name, "%08x_%s+%x",
3259 input_section->id & 0xffffffff,
3260 hash->root.root.root.string,
3261 (int) rel->r_addend & 0xffffffff);
3262 }
3263 else
3264 {
3265 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3266 stub_name = (char *) bfd_malloc (len);
3267 if (stub_name != NULL)
3268 sprintf (stub_name, "%08x_%x:%x+%x",
3269 input_section->id & 0xffffffff,
3270 sym_sec->id & 0xffffffff,
3271 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3272 (int) rel->r_addend & 0xffffffff);
3273 }
3274
3275 return stub_name;
3276 }
3277
3278 /* Look up an entry in the stub hash. Stub entries are cached because
3279 creating the stub name takes a bit of time. */
3280
3281 static struct elf32_arm_stub_hash_entry *
3282 elf32_arm_get_stub_entry (const asection *input_section,
3283 const asection *sym_sec,
3284 struct elf_link_hash_entry *hash,
3285 const Elf_Internal_Rela *rel,
3286 struct elf32_arm_link_hash_table *htab)
3287 {
3288 struct elf32_arm_stub_hash_entry *stub_entry;
3289 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3290 const asection *id_sec;
3291
3292 if ((input_section->flags & SEC_CODE) == 0)
3293 return NULL;
3294
3295 /* If this input section is part of a group of sections sharing one
3296 stub section, then use the id of the first section in the group.
3297 Stub names need to include a section id, as there may well be
3298 more than one stub used to reach say, printf, and we need to
3299 distinguish between them. */
3300 id_sec = htab->stub_group[input_section->id].link_sec;
3301
3302 if (h != NULL && h->stub_cache != NULL
3303 && h->stub_cache->h == h
3304 && h->stub_cache->id_sec == id_sec)
3305 {
3306 stub_entry = h->stub_cache;
3307 }
3308 else
3309 {
3310 char *stub_name;
3311
3312 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3313 if (stub_name == NULL)
3314 return NULL;
3315
3316 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3317 stub_name, FALSE, FALSE);
3318 if (h != NULL)
3319 h->stub_cache = stub_entry;
3320
3321 free (stub_name);
3322 }
3323
3324 return stub_entry;
3325 }
3326
3327 /* Find or create a stub section. Returns a pointer to the stub section, and
3328 the section to which the stub section will be attached (in *LINK_SEC_P).
3329 LINK_SEC_P may be NULL. */
3330
3331 static asection *
3332 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3333 struct elf32_arm_link_hash_table *htab)
3334 {
3335 asection *link_sec;
3336 asection *stub_sec;
3337
3338 link_sec = htab->stub_group[section->id].link_sec;
3339 stub_sec = htab->stub_group[section->id].stub_sec;
3340 if (stub_sec == NULL)
3341 {
3342 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3343 if (stub_sec == NULL)
3344 {
3345 size_t namelen;
3346 bfd_size_type len;
3347 char *s_name;
3348
3349 namelen = strlen (link_sec->name);
3350 len = namelen + sizeof (STUB_SUFFIX);
3351 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3352 if (s_name == NULL)
3353 return NULL;
3354
3355 memcpy (s_name, link_sec->name, namelen);
3356 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3357 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3358 if (stub_sec == NULL)
3359 return NULL;
3360 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3361 }
3362 htab->stub_group[section->id].stub_sec = stub_sec;
3363 }
3364
3365 if (link_sec_p)
3366 *link_sec_p = link_sec;
3367
3368 return stub_sec;
3369 }
3370
3371 /* Add a new stub entry to the stub hash. Not all fields of the new
3372 stub entry are initialised. */
3373
3374 static struct elf32_arm_stub_hash_entry *
3375 elf32_arm_add_stub (const char *stub_name,
3376 asection *section,
3377 struct elf32_arm_link_hash_table *htab)
3378 {
3379 asection *link_sec;
3380 asection *stub_sec;
3381 struct elf32_arm_stub_hash_entry *stub_entry;
3382
3383 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3384 if (stub_sec == NULL)
3385 return NULL;
3386
3387 /* Enter this entry into the linker stub hash table. */
3388 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3389 TRUE, FALSE);
3390 if (stub_entry == NULL)
3391 {
3392 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3393 section->owner,
3394 stub_name);
3395 return NULL;
3396 }
3397
3398 stub_entry->stub_sec = stub_sec;
3399 stub_entry->stub_offset = 0;
3400 stub_entry->id_sec = link_sec;
3401
3402 return stub_entry;
3403 }
3404
3405 /* Store an Arm insn into an output section not processed by
3406 elf32_arm_write_section. */
3407
3408 static void
3409 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3410 bfd * output_bfd, bfd_vma val, void * ptr)
3411 {
3412 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3413 bfd_putl32 (val, ptr);
3414 else
3415 bfd_putb32 (val, ptr);
3416 }
3417
3418 /* Store a 16-bit Thumb insn into an output section not processed by
3419 elf32_arm_write_section. */
3420
3421 static void
3422 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3423 bfd * output_bfd, bfd_vma val, void * ptr)
3424 {
3425 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3426 bfd_putl16 (val, ptr);
3427 else
3428 bfd_putb16 (val, ptr);
3429 }
3430
3431 static bfd_reloc_status_type elf32_arm_final_link_relocate
3432 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3433 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3434 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3435
3436 static bfd_boolean
3437 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3438 void * in_arg)
3439 {
3440 #define MAXRELOCS 2
3441 struct elf32_arm_stub_hash_entry *stub_entry;
3442 struct bfd_link_info *info;
3443 struct elf32_arm_link_hash_table *htab;
3444 asection *stub_sec;
3445 bfd *stub_bfd;
3446 bfd_vma stub_addr;
3447 bfd_byte *loc;
3448 bfd_vma sym_value;
3449 int template_size;
3450 int size;
3451 const insn_sequence *template_sequence;
3452 int i;
3453 struct elf32_arm_link_hash_table * globals;
3454 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3455 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3456 int nrelocs = 0;
3457
3458 /* Massage our args to the form they really have. */
3459 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3460 info = (struct bfd_link_info *) in_arg;
3461
3462 globals = elf32_arm_hash_table (info);
3463
3464 htab = elf32_arm_hash_table (info);
3465 stub_sec = stub_entry->stub_sec;
3466
3467 if ((htab->fix_cortex_a8 < 0)
3468 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3469 /* We have to do the a8 fixes last, as they are less aligned than
3470 the other veneers. */
3471 return TRUE;
3472
3473 /* Make a note of the offset within the stubs for this entry. */
3474 stub_entry->stub_offset = stub_sec->size;
3475 loc = stub_sec->contents + stub_entry->stub_offset;
3476
3477 stub_bfd = stub_sec->owner;
3478
3479 /* This is the address of the start of the stub. */
3480 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3481 + stub_entry->stub_offset;
3482
3483 /* This is the address of the stub destination. */
3484 sym_value = (stub_entry->target_value
3485 + stub_entry->target_section->output_offset
3486 + stub_entry->target_section->output_section->vma);
3487
3488 template_sequence = stub_entry->stub_template;
3489 template_size = stub_entry->stub_template_size;
3490
3491 size = 0;
3492 for (i = 0; i < template_size; i++)
3493 {
3494 switch (template_sequence[i].type)
3495 {
3496 case THUMB16_TYPE:
3497 {
3498 bfd_vma data = (bfd_vma) template_sequence[i].data;
3499 if (template_sequence[i].reloc_addend != 0)
3500 {
3501 /* We've borrowed the reloc_addend field to mean we should
3502 insert a condition code into this (Thumb-1 branch)
3503 instruction. See THUMB16_BCOND_INSN. */
3504 BFD_ASSERT ((data & 0xff00) == 0xd000);
3505 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3506 }
3507 put_thumb_insn (globals, stub_bfd, data, loc + size);
3508 size += 2;
3509 }
3510 break;
3511
3512 case THUMB32_TYPE:
3513 put_thumb_insn (globals, stub_bfd,
3514 (template_sequence[i].data >> 16) & 0xffff,
3515 loc + size);
3516 put_thumb_insn (globals, stub_bfd, template_sequence[i].data & 0xffff,
3517 loc + size + 2);
3518 if (template_sequence[i].r_type != R_ARM_NONE)
3519 {
3520 stub_reloc_idx[nrelocs] = i;
3521 stub_reloc_offset[nrelocs++] = size;
3522 }
3523 size += 4;
3524 break;
3525
3526 case ARM_TYPE:
3527 put_arm_insn (globals, stub_bfd, template_sequence[i].data,
3528 loc + size);
3529 /* Handle cases where the target is encoded within the
3530 instruction. */
3531 if (template_sequence[i].r_type == R_ARM_JUMP24)
3532 {
3533 stub_reloc_idx[nrelocs] = i;
3534 stub_reloc_offset[nrelocs++] = size;
3535 }
3536 size += 4;
3537 break;
3538
3539 case DATA_TYPE:
3540 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3541 stub_reloc_idx[nrelocs] = i;
3542 stub_reloc_offset[nrelocs++] = size;
3543 size += 4;
3544 break;
3545
3546 default:
3547 BFD_FAIL ();
3548 return FALSE;
3549 }
3550 }
3551
3552 stub_sec->size += size;
3553
3554 /* Stub size has already been computed in arm_size_one_stub. Check
3555 consistency. */
3556 BFD_ASSERT (size == stub_entry->stub_size);
3557
3558 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3559 if (stub_entry->st_type == STT_ARM_TFUNC)
3560 sym_value |= 1;
3561
3562 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3563 in each stub. */
3564 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3565
3566 for (i = 0; i < nrelocs; i++)
3567 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3568 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3569 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3570 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3571 {
3572 Elf_Internal_Rela rel;
3573 bfd_boolean unresolved_reloc;
3574 char *error_message;
3575 int sym_flags
3576 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3577 ? STT_ARM_TFUNC : 0;
3578 bfd_vma points_to = sym_value + stub_entry->target_addend;
3579
3580 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3581 rel.r_info = ELF32_R_INFO (0,
3582 template_sequence[stub_reloc_idx[i]].r_type);
3583 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3584
3585 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3586 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3587 template should refer back to the instruction after the original
3588 branch. */
3589 points_to = sym_value;
3590
3591 /* There may be unintended consequences if this is not true. */
3592 BFD_ASSERT (stub_entry->h == NULL);
3593
3594 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3595 properly. We should probably use this function unconditionally,
3596 rather than only for certain relocations listed in the enclosing
3597 conditional, for the sake of consistency. */
3598 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3599 (template_sequence[stub_reloc_idx[i]].r_type),
3600 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3601 points_to, info, stub_entry->target_section, "", sym_flags,
3602 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3603 &error_message);
3604 }
3605 else
3606 {
3607 _bfd_final_link_relocate (elf32_arm_howto_from_type
3608 (template_sequence[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3609 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3610 sym_value + stub_entry->target_addend,
3611 template_sequence[stub_reloc_idx[i]].reloc_addend);
3612 }
3613
3614 return TRUE;
3615 #undef MAXRELOCS
3616 }
3617
3618 /* Calculate the template, template size and instruction size for a stub.
3619 Return value is the instruction size. */
3620
3621 static unsigned int
3622 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3623 const insn_sequence **stub_template,
3624 int *stub_template_size)
3625 {
3626 const insn_sequence *template_sequence = NULL;
3627 int template_size = 0, i;
3628 unsigned int size;
3629
3630 template_sequence = stub_definitions[stub_type].template_sequence;
3631 template_size = stub_definitions[stub_type].template_size;
3632
3633 size = 0;
3634 for (i = 0; i < template_size; i++)
3635 {
3636 switch (template_sequence[i].type)
3637 {
3638 case THUMB16_TYPE:
3639 size += 2;
3640 break;
3641
3642 case ARM_TYPE:
3643 case THUMB32_TYPE:
3644 case DATA_TYPE:
3645 size += 4;
3646 break;
3647
3648 default:
3649 BFD_FAIL ();
3650 return FALSE;
3651 }
3652 }
3653
3654 if (stub_template)
3655 *stub_template = template_sequence;
3656
3657 if (stub_template_size)
3658 *stub_template_size = template_size;
3659
3660 return size;
3661 }
3662
3663 /* As above, but don't actually build the stub. Just bump offset so
3664 we know stub section sizes. */
3665
3666 static bfd_boolean
3667 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3668 void * in_arg)
3669 {
3670 struct elf32_arm_stub_hash_entry *stub_entry;
3671 struct elf32_arm_link_hash_table *htab;
3672 const insn_sequence *template_sequence;
3673 int template_size, size;
3674
3675 /* Massage our args to the form they really have. */
3676 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3677 htab = (struct elf32_arm_link_hash_table *) in_arg;
3678
3679 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3680 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3681
3682 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3683 &template_size);
3684
3685 stub_entry->stub_size = size;
3686 stub_entry->stub_template = template_sequence;
3687 stub_entry->stub_template_size = template_size;
3688
3689 size = (size + 7) & ~7;
3690 stub_entry->stub_sec->size += size;
3691
3692 return TRUE;
3693 }
3694
3695 /* External entry points for sizing and building linker stubs. */
3696
3697 /* Set up various things so that we can make a list of input sections
3698 for each output section included in the link. Returns -1 on error,
3699 0 when no stubs will be needed, and 1 on success. */
3700
3701 int
3702 elf32_arm_setup_section_lists (bfd *output_bfd,
3703 struct bfd_link_info *info)
3704 {
3705 bfd *input_bfd;
3706 unsigned int bfd_count;
3707 int top_id, top_index;
3708 asection *section;
3709 asection **input_list, **list;
3710 bfd_size_type amt;
3711 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3712
3713 if (! is_elf_hash_table (htab))
3714 return 0;
3715
3716 /* Count the number of input BFDs and find the top input section id. */
3717 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3718 input_bfd != NULL;
3719 input_bfd = input_bfd->link_next)
3720 {
3721 bfd_count += 1;
3722 for (section = input_bfd->sections;
3723 section != NULL;
3724 section = section->next)
3725 {
3726 if (top_id < section->id)
3727 top_id = section->id;
3728 }
3729 }
3730 htab->bfd_count = bfd_count;
3731
3732 amt = sizeof (struct map_stub) * (top_id + 1);
3733 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3734 if (htab->stub_group == NULL)
3735 return -1;
3736
3737 /* We can't use output_bfd->section_count here to find the top output
3738 section index as some sections may have been removed, and
3739 _bfd_strip_section_from_output doesn't renumber the indices. */
3740 for (section = output_bfd->sections, top_index = 0;
3741 section != NULL;
3742 section = section->next)
3743 {
3744 if (top_index < section->index)
3745 top_index = section->index;
3746 }
3747
3748 htab->top_index = top_index;
3749 amt = sizeof (asection *) * (top_index + 1);
3750 input_list = (asection **) bfd_malloc (amt);
3751 htab->input_list = input_list;
3752 if (input_list == NULL)
3753 return -1;
3754
3755 /* For sections we aren't interested in, mark their entries with a
3756 value we can check later. */
3757 list = input_list + top_index;
3758 do
3759 *list = bfd_abs_section_ptr;
3760 while (list-- != input_list);
3761
3762 for (section = output_bfd->sections;
3763 section != NULL;
3764 section = section->next)
3765 {
3766 if ((section->flags & SEC_CODE) != 0)
3767 input_list[section->index] = NULL;
3768 }
3769
3770 return 1;
3771 }
3772
3773 /* The linker repeatedly calls this function for each input section,
3774 in the order that input sections are linked into output sections.
3775 Build lists of input sections to determine groupings between which
3776 we may insert linker stubs. */
3777
3778 void
3779 elf32_arm_next_input_section (struct bfd_link_info *info,
3780 asection *isec)
3781 {
3782 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3783
3784 if (isec->output_section->index <= htab->top_index)
3785 {
3786 asection **list = htab->input_list + isec->output_section->index;
3787
3788 if (*list != bfd_abs_section_ptr)
3789 {
3790 /* Steal the link_sec pointer for our list. */
3791 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3792 /* This happens to make the list in reverse order,
3793 which we reverse later. */
3794 PREV_SEC (isec) = *list;
3795 *list = isec;
3796 }
3797 }
3798 }
3799
3800 /* See whether we can group stub sections together. Grouping stub
3801 sections may result in fewer stubs. More importantly, we need to
3802 put all .init* and .fini* stubs at the end of the .init or
3803 .fini output sections respectively, because glibc splits the
3804 _init and _fini functions into multiple parts. Putting a stub in
3805 the middle of a function is not a good idea. */
3806
3807 static void
3808 group_sections (struct elf32_arm_link_hash_table *htab,
3809 bfd_size_type stub_group_size,
3810 bfd_boolean stubs_always_after_branch)
3811 {
3812 asection **list = htab->input_list;
3813
3814 do
3815 {
3816 asection *tail = *list;
3817 asection *head;
3818
3819 if (tail == bfd_abs_section_ptr)
3820 continue;
3821
3822 /* Reverse the list: we must avoid placing stubs at the
3823 beginning of the section because the beginning of the text
3824 section may be required for an interrupt vector in bare metal
3825 code. */
3826 #define NEXT_SEC PREV_SEC
3827 head = NULL;
3828 while (tail != NULL)
3829 {
3830 /* Pop from tail. */
3831 asection *item = tail;
3832 tail = PREV_SEC (item);
3833
3834 /* Push on head. */
3835 NEXT_SEC (item) = head;
3836 head = item;
3837 }
3838
3839 while (head != NULL)
3840 {
3841 asection *curr;
3842 asection *next;
3843 bfd_vma stub_group_start = head->output_offset;
3844 bfd_vma end_of_next;
3845
3846 curr = head;
3847 while (NEXT_SEC (curr) != NULL)
3848 {
3849 next = NEXT_SEC (curr);
3850 end_of_next = next->output_offset + next->size;
3851 if (end_of_next - stub_group_start >= stub_group_size)
3852 /* End of NEXT is too far from start, so stop. */
3853 break;
3854 /* Add NEXT to the group. */
3855 curr = next;
3856 }
3857
3858 /* OK, the size from the start to the start of CURR is less
3859 than stub_group_size and thus can be handled by one stub
3860 section. (Or the head section is itself larger than
3861 stub_group_size, in which case we may be toast.)
3862 We should really be keeping track of the total size of
3863 stubs added here, as stubs contribute to the final output
3864 section size. */
3865 do
3866 {
3867 next = NEXT_SEC (head);
3868 /* Set up this stub group. */
3869 htab->stub_group[head->id].link_sec = curr;
3870 }
3871 while (head != curr && (head = next) != NULL);
3872
3873 /* But wait, there's more! Input sections up to stub_group_size
3874 bytes after the stub section can be handled by it too. */
3875 if (!stubs_always_after_branch)
3876 {
3877 stub_group_start = curr->output_offset + curr->size;
3878
3879 while (next != NULL)
3880 {
3881 end_of_next = next->output_offset + next->size;
3882 if (end_of_next - stub_group_start >= stub_group_size)
3883 /* End of NEXT is too far from stubs, so stop. */
3884 break;
3885 /* Add NEXT to the stub group. */
3886 head = next;
3887 next = NEXT_SEC (head);
3888 htab->stub_group[head->id].link_sec = curr;
3889 }
3890 }
3891 head = next;
3892 }
3893 }
3894 while (list++ != htab->input_list + htab->top_index);
3895
3896 free (htab->input_list);
3897 #undef PREV_SEC
3898 #undef NEXT_SEC
3899 }
3900
3901 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3902 erratum fix. */
3903
3904 static int
3905 a8_reloc_compare (const void *a, const void *b)
3906 {
3907 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3908 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3909
3910 if (ra->from < rb->from)
3911 return -1;
3912 else if (ra->from > rb->from)
3913 return 1;
3914 else
3915 return 0;
3916 }
3917
3918 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3919 const char *, char **);
3920
3921 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3922 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3923 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3924 otherwise. */
3925
3926 static bfd_boolean
3927 cortex_a8_erratum_scan (bfd *input_bfd,
3928 struct bfd_link_info *info,
3929 struct a8_erratum_fix **a8_fixes_p,
3930 unsigned int *num_a8_fixes_p,
3931 unsigned int *a8_fix_table_size_p,
3932 struct a8_erratum_reloc *a8_relocs,
3933 unsigned int num_a8_relocs,
3934 unsigned prev_num_a8_fixes,
3935 bfd_boolean *stub_changed_p)
3936 {
3937 asection *section;
3938 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3939 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3940 unsigned int num_a8_fixes = *num_a8_fixes_p;
3941 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3942
3943 for (section = input_bfd->sections;
3944 section != NULL;
3945 section = section->next)
3946 {
3947 bfd_byte *contents = NULL;
3948 struct _arm_elf_section_data *sec_data;
3949 unsigned int span;
3950 bfd_vma base_vma;
3951
3952 if (elf_section_type (section) != SHT_PROGBITS
3953 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3954 || (section->flags & SEC_EXCLUDE) != 0
3955 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3956 || (section->output_section == bfd_abs_section_ptr))
3957 continue;
3958
3959 base_vma = section->output_section->vma + section->output_offset;
3960
3961 if (elf_section_data (section)->this_hdr.contents != NULL)
3962 contents = elf_section_data (section)->this_hdr.contents;
3963 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3964 return TRUE;
3965
3966 sec_data = elf32_arm_section_data (section);
3967
3968 for (span = 0; span < sec_data->mapcount; span++)
3969 {
3970 unsigned int span_start = sec_data->map[span].vma;
3971 unsigned int span_end = (span == sec_data->mapcount - 1)
3972 ? section->size : sec_data->map[span + 1].vma;
3973 unsigned int i;
3974 char span_type = sec_data->map[span].type;
3975 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3976
3977 if (span_type != 't')
3978 continue;
3979
3980 /* Span is entirely within a single 4KB region: skip scanning. */
3981 if (((base_vma + span_start) & ~0xfff)
3982 == ((base_vma + span_end) & ~0xfff))
3983 continue;
3984
3985 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3986
3987 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3988 * The branch target is in the same 4KB region as the
3989 first half of the branch.
3990 * The instruction before the branch is a 32-bit
3991 length non-branch instruction. */
3992 for (i = span_start; i < span_end;)
3993 {
3994 unsigned int insn = bfd_getl16 (&contents[i]);
3995 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3996 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3997
3998 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3999 insn_32bit = TRUE;
4000
4001 if (insn_32bit)
4002 {
4003 /* Load the rest of the insn (in manual-friendly order). */
4004 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4005
4006 /* Encoding T4: B<c>.W. */
4007 is_b = (insn & 0xf800d000) == 0xf0009000;
4008 /* Encoding T1: BL<c>.W. */
4009 is_bl = (insn & 0xf800d000) == 0xf000d000;
4010 /* Encoding T2: BLX<c>.W. */
4011 is_blx = (insn & 0xf800d000) == 0xf000c000;
4012 /* Encoding T3: B<c>.W (not permitted in IT block). */
4013 is_bcc = (insn & 0xf800d000) == 0xf0008000
4014 && (insn & 0x07f00000) != 0x03800000;
4015 }
4016
4017 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4018
4019 if (((base_vma + i) & 0xfff) == 0xffe
4020 && insn_32bit
4021 && is_32bit_branch
4022 && last_was_32bit
4023 && ! last_was_branch)
4024 {
4025 bfd_signed_vma offset;
4026 bfd_boolean force_target_arm = FALSE;
4027 bfd_boolean force_target_thumb = FALSE;
4028 bfd_vma target;
4029 enum elf32_arm_stub_type stub_type = arm_stub_none;
4030 struct a8_erratum_reloc key, *found;
4031
4032 key.from = base_vma + i;
4033 found = (struct a8_erratum_reloc *)
4034 bsearch (&key, a8_relocs, num_a8_relocs,
4035 sizeof (struct a8_erratum_reloc),
4036 &a8_reloc_compare);
4037
4038 if (found)
4039 {
4040 char *error_message = NULL;
4041 struct elf_link_hash_entry *entry;
4042
4043 /* We don't care about the error returned from this
4044 function, only if there is glue or not. */
4045 entry = find_thumb_glue (info, found->sym_name,
4046 &error_message);
4047
4048 if (entry)
4049 found->non_a8_stub = TRUE;
4050
4051 if (found->r_type == R_ARM_THM_CALL
4052 && found->st_type != STT_ARM_TFUNC)
4053 force_target_arm = TRUE;
4054 else if (found->r_type == R_ARM_THM_CALL
4055 && found->st_type == STT_ARM_TFUNC)
4056 force_target_thumb = TRUE;
4057 }
4058
4059 /* Check if we have an offending branch instruction. */
4060
4061 if (found && found->non_a8_stub)
4062 /* We've already made a stub for this instruction, e.g.
4063 it's a long branch or a Thumb->ARM stub. Assume that
4064 stub will suffice to work around the A8 erratum (see
4065 setting of always_after_branch above). */
4066 ;
4067 else if (is_bcc)
4068 {
4069 offset = (insn & 0x7ff) << 1;
4070 offset |= (insn & 0x3f0000) >> 4;
4071 offset |= (insn & 0x2000) ? 0x40000 : 0;
4072 offset |= (insn & 0x800) ? 0x80000 : 0;
4073 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4074 if (offset & 0x100000)
4075 offset |= ~ ((bfd_signed_vma) 0xfffff);
4076 stub_type = arm_stub_a8_veneer_b_cond;
4077 }
4078 else if (is_b || is_bl || is_blx)
4079 {
4080 int s = (insn & 0x4000000) != 0;
4081 int j1 = (insn & 0x2000) != 0;
4082 int j2 = (insn & 0x800) != 0;
4083 int i1 = !(j1 ^ s);
4084 int i2 = !(j2 ^ s);
4085
4086 offset = (insn & 0x7ff) << 1;
4087 offset |= (insn & 0x3ff0000) >> 4;
4088 offset |= i2 << 22;
4089 offset |= i1 << 23;
4090 offset |= s << 24;
4091 if (offset & 0x1000000)
4092 offset |= ~ ((bfd_signed_vma) 0xffffff);
4093
4094 if (is_blx)
4095 offset &= ~ ((bfd_signed_vma) 3);
4096
4097 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4098 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4099 }
4100
4101 if (stub_type != arm_stub_none)
4102 {
4103 bfd_vma pc_for_insn = base_vma + i + 4;
4104
4105 /* The original instruction is a BL, but the target is
4106 an ARM instruction. If we were not making a stub,
4107 the BL would have been converted to a BLX. Use the
4108 BLX stub instead in that case. */
4109 if (htab->use_blx && force_target_arm
4110 && stub_type == arm_stub_a8_veneer_bl)
4111 {
4112 stub_type = arm_stub_a8_veneer_blx;
4113 is_blx = TRUE;
4114 is_bl = FALSE;
4115 }
4116 /* Conversely, if the original instruction was
4117 BLX but the target is Thumb mode, use the BL
4118 stub. */
4119 else if (force_target_thumb
4120 && stub_type == arm_stub_a8_veneer_blx)
4121 {
4122 stub_type = arm_stub_a8_veneer_bl;
4123 is_blx = FALSE;
4124 is_bl = TRUE;
4125 }
4126
4127 if (is_blx)
4128 pc_for_insn &= ~ ((bfd_vma) 3);
4129
4130 /* If we found a relocation, use the proper destination,
4131 not the offset in the (unrelocated) instruction.
4132 Note this is always done if we switched the stub type
4133 above. */
4134 if (found)
4135 offset =
4136 (bfd_signed_vma) (found->destination - pc_for_insn);
4137
4138 target = pc_for_insn + offset;
4139
4140 /* The BLX stub is ARM-mode code. Adjust the offset to
4141 take the different PC value (+8 instead of +4) into
4142 account. */
4143 if (stub_type == arm_stub_a8_veneer_blx)
4144 offset += 4;
4145
4146 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4147 {
4148 char *stub_name = NULL;
4149
4150 if (num_a8_fixes == a8_fix_table_size)
4151 {
4152 a8_fix_table_size *= 2;
4153 a8_fixes = (struct a8_erratum_fix *)
4154 bfd_realloc (a8_fixes,
4155 sizeof (struct a8_erratum_fix)
4156 * a8_fix_table_size);
4157 }
4158
4159 if (num_a8_fixes < prev_num_a8_fixes)
4160 {
4161 /* If we're doing a subsequent scan,
4162 check if we've found the same fix as
4163 before, and try and reuse the stub
4164 name. */
4165 stub_name = a8_fixes[num_a8_fixes].stub_name;
4166 if ((a8_fixes[num_a8_fixes].section != section)
4167 || (a8_fixes[num_a8_fixes].offset != i))
4168 {
4169 free (stub_name);
4170 stub_name = NULL;
4171 *stub_changed_p = TRUE;
4172 }
4173 }
4174
4175 if (!stub_name)
4176 {
4177 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4178 if (stub_name != NULL)
4179 sprintf (stub_name, "%x:%x", section->id, i);
4180 }
4181
4182 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4183 a8_fixes[num_a8_fixes].section = section;
4184 a8_fixes[num_a8_fixes].offset = i;
4185 a8_fixes[num_a8_fixes].addend = offset;
4186 a8_fixes[num_a8_fixes].orig_insn = insn;
4187 a8_fixes[num_a8_fixes].stub_name = stub_name;
4188 a8_fixes[num_a8_fixes].stub_type = stub_type;
4189
4190 num_a8_fixes++;
4191 }
4192 }
4193 }
4194
4195 i += insn_32bit ? 4 : 2;
4196 last_was_32bit = insn_32bit;
4197 last_was_branch = is_32bit_branch;
4198 }
4199 }
4200
4201 if (elf_section_data (section)->this_hdr.contents == NULL)
4202 free (contents);
4203 }
4204
4205 *a8_fixes_p = a8_fixes;
4206 *num_a8_fixes_p = num_a8_fixes;
4207 *a8_fix_table_size_p = a8_fix_table_size;
4208
4209 return FALSE;
4210 }
4211
4212 /* Determine and set the size of the stub section for a final link.
4213
4214 The basic idea here is to examine all the relocations looking for
4215 PC-relative calls to a target that is unreachable with a "bl"
4216 instruction. */
4217
4218 bfd_boolean
4219 elf32_arm_size_stubs (bfd *output_bfd,
4220 bfd *stub_bfd,
4221 struct bfd_link_info *info,
4222 bfd_signed_vma group_size,
4223 asection * (*add_stub_section) (const char *, asection *),
4224 void (*layout_sections_again) (void))
4225 {
4226 bfd_size_type stub_group_size;
4227 bfd_boolean stubs_always_after_branch;
4228 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4229 struct a8_erratum_fix *a8_fixes = NULL;
4230 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4231 struct a8_erratum_reloc *a8_relocs = NULL;
4232 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4233
4234 if (htab->fix_cortex_a8)
4235 {
4236 a8_fixes = (struct a8_erratum_fix *)
4237 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4238 a8_relocs = (struct a8_erratum_reloc *)
4239 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4240 }
4241
4242 /* Propagate mach to stub bfd, because it may not have been
4243 finalized when we created stub_bfd. */
4244 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4245 bfd_get_mach (output_bfd));
4246
4247 /* Stash our params away. */
4248 htab->stub_bfd = stub_bfd;
4249 htab->add_stub_section = add_stub_section;
4250 htab->layout_sections_again = layout_sections_again;
4251 stubs_always_after_branch = group_size < 0;
4252
4253 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4254 as the first half of a 32-bit branch straddling two 4K pages. This is a
4255 crude way of enforcing that. */
4256 if (htab->fix_cortex_a8)
4257 stubs_always_after_branch = 1;
4258
4259 if (group_size < 0)
4260 stub_group_size = -group_size;
4261 else
4262 stub_group_size = group_size;
4263
4264 if (stub_group_size == 1)
4265 {
4266 /* Default values. */
4267 /* Thumb branch range is +-4MB has to be used as the default
4268 maximum size (a given section can contain both ARM and Thumb
4269 code, so the worst case has to be taken into account).
4270
4271 This value is 24K less than that, which allows for 2025
4272 12-byte stubs. If we exceed that, then we will fail to link.
4273 The user will have to relink with an explicit group size
4274 option. */
4275 stub_group_size = 4170000;
4276 }
4277
4278 group_sections (htab, stub_group_size, stubs_always_after_branch);
4279
4280 /* If we're applying the cortex A8 fix, we need to determine the
4281 program header size now, because we cannot change it later --
4282 that could alter section placements. Notice the A8 erratum fix
4283 ends up requiring the section addresses to remain unchanged
4284 modulo the page size. That's something we cannot represent
4285 inside BFD, and we don't want to force the section alignment to
4286 be the page size. */
4287 if (htab->fix_cortex_a8)
4288 (*htab->layout_sections_again) ();
4289
4290 while (1)
4291 {
4292 bfd *input_bfd;
4293 unsigned int bfd_indx;
4294 asection *stub_sec;
4295 bfd_boolean stub_changed = FALSE;
4296 unsigned prev_num_a8_fixes = num_a8_fixes;
4297
4298 num_a8_fixes = 0;
4299 for (input_bfd = info->input_bfds, bfd_indx = 0;
4300 input_bfd != NULL;
4301 input_bfd = input_bfd->link_next, bfd_indx++)
4302 {
4303 Elf_Internal_Shdr *symtab_hdr;
4304 asection *section;
4305 Elf_Internal_Sym *local_syms = NULL;
4306
4307 num_a8_relocs = 0;
4308
4309 /* We'll need the symbol table in a second. */
4310 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4311 if (symtab_hdr->sh_info == 0)
4312 continue;
4313
4314 /* Walk over each section attached to the input bfd. */
4315 for (section = input_bfd->sections;
4316 section != NULL;
4317 section = section->next)
4318 {
4319 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4320
4321 /* If there aren't any relocs, then there's nothing more
4322 to do. */
4323 if ((section->flags & SEC_RELOC) == 0
4324 || section->reloc_count == 0
4325 || (section->flags & SEC_CODE) == 0)
4326 continue;
4327
4328 /* If this section is a link-once section that will be
4329 discarded, then don't create any stubs. */
4330 if (section->output_section == NULL
4331 || section->output_section->owner != output_bfd)
4332 continue;
4333
4334 /* Get the relocs. */
4335 internal_relocs
4336 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4337 NULL, info->keep_memory);
4338 if (internal_relocs == NULL)
4339 goto error_ret_free_local;
4340
4341 /* Now examine each relocation. */
4342 irela = internal_relocs;
4343 irelaend = irela + section->reloc_count;
4344 for (; irela < irelaend; irela++)
4345 {
4346 unsigned int r_type, r_indx;
4347 enum elf32_arm_stub_type stub_type;
4348 struct elf32_arm_stub_hash_entry *stub_entry;
4349 asection *sym_sec;
4350 bfd_vma sym_value;
4351 bfd_vma destination;
4352 struct elf32_arm_link_hash_entry *hash;
4353 const char *sym_name;
4354 char *stub_name;
4355 const asection *id_sec;
4356 unsigned char st_type;
4357 bfd_boolean created_stub = FALSE;
4358
4359 r_type = ELF32_R_TYPE (irela->r_info);
4360 r_indx = ELF32_R_SYM (irela->r_info);
4361
4362 if (r_type >= (unsigned int) R_ARM_max)
4363 {
4364 bfd_set_error (bfd_error_bad_value);
4365 error_ret_free_internal:
4366 if (elf_section_data (section)->relocs == NULL)
4367 free (internal_relocs);
4368 goto error_ret_free_local;
4369 }
4370
4371 /* Only look for stubs on branch instructions. */
4372 if ((r_type != (unsigned int) R_ARM_CALL)
4373 && (r_type != (unsigned int) R_ARM_THM_CALL)
4374 && (r_type != (unsigned int) R_ARM_JUMP24)
4375 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4376 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4377 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4378 && (r_type != (unsigned int) R_ARM_PLT32))
4379 continue;
4380
4381 /* Now determine the call target, its name, value,
4382 section. */
4383 sym_sec = NULL;
4384 sym_value = 0;
4385 destination = 0;
4386 hash = NULL;
4387 sym_name = NULL;
4388 if (r_indx < symtab_hdr->sh_info)
4389 {
4390 /* It's a local symbol. */
4391 Elf_Internal_Sym *sym;
4392 Elf_Internal_Shdr *hdr;
4393
4394 if (local_syms == NULL)
4395 {
4396 local_syms
4397 = (Elf_Internal_Sym *) symtab_hdr->contents;
4398 if (local_syms == NULL)
4399 local_syms
4400 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4401 symtab_hdr->sh_info, 0,
4402 NULL, NULL, NULL);
4403 if (local_syms == NULL)
4404 goto error_ret_free_internal;
4405 }
4406
4407 sym = local_syms + r_indx;
4408 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4409 sym_sec = hdr->bfd_section;
4410 if (!sym_sec)
4411 /* This is an undefined symbol. It can never
4412 be resolved. */
4413 continue;
4414
4415 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4416 sym_value = sym->st_value;
4417 destination = (sym_value + irela->r_addend
4418 + sym_sec->output_offset
4419 + sym_sec->output_section->vma);
4420 st_type = ELF_ST_TYPE (sym->st_info);
4421 sym_name
4422 = bfd_elf_string_from_elf_section (input_bfd,
4423 symtab_hdr->sh_link,
4424 sym->st_name);
4425 }
4426 else
4427 {
4428 /* It's an external symbol. */
4429 int e_indx;
4430
4431 e_indx = r_indx - symtab_hdr->sh_info;
4432 hash = ((struct elf32_arm_link_hash_entry *)
4433 elf_sym_hashes (input_bfd)[e_indx]);
4434
4435 while (hash->root.root.type == bfd_link_hash_indirect
4436 || hash->root.root.type == bfd_link_hash_warning)
4437 hash = ((struct elf32_arm_link_hash_entry *)
4438 hash->root.root.u.i.link);
4439
4440 if (hash->root.root.type == bfd_link_hash_defined
4441 || hash->root.root.type == bfd_link_hash_defweak)
4442 {
4443 sym_sec = hash->root.root.u.def.section;
4444 sym_value = hash->root.root.u.def.value;
4445
4446 struct elf32_arm_link_hash_table *globals =
4447 elf32_arm_hash_table (info);
4448
4449 /* For a destination in a shared library,
4450 use the PLT stub as target address to
4451 decide whether a branch stub is
4452 needed. */
4453 if (globals->splt != NULL && hash != NULL
4454 && hash->root.plt.offset != (bfd_vma) -1)
4455 {
4456 sym_sec = globals->splt;
4457 sym_value = hash->root.plt.offset;
4458 if (sym_sec->output_section != NULL)
4459 destination = (sym_value
4460 + sym_sec->output_offset
4461 + sym_sec->output_section->vma);
4462 }
4463 else if (sym_sec->output_section != NULL)
4464 destination = (sym_value + irela->r_addend
4465 + sym_sec->output_offset
4466 + sym_sec->output_section->vma);
4467 }
4468 else if ((hash->root.root.type == bfd_link_hash_undefined)
4469 || (hash->root.root.type == bfd_link_hash_undefweak))
4470 {
4471 /* For a shared library, use the PLT stub as
4472 target address to decide whether a long
4473 branch stub is needed.
4474 For absolute code, they cannot be handled. */
4475 struct elf32_arm_link_hash_table *globals =
4476 elf32_arm_hash_table (info);
4477
4478 if (globals->splt != NULL && hash != NULL
4479 && hash->root.plt.offset != (bfd_vma) -1)
4480 {
4481 sym_sec = globals->splt;
4482 sym_value = hash->root.plt.offset;
4483 if (sym_sec->output_section != NULL)
4484 destination = (sym_value
4485 + sym_sec->output_offset
4486 + sym_sec->output_section->vma);
4487 }
4488 else
4489 continue;
4490 }
4491 else
4492 {
4493 bfd_set_error (bfd_error_bad_value);
4494 goto error_ret_free_internal;
4495 }
4496 st_type = ELF_ST_TYPE (hash->root.type);
4497 sym_name = hash->root.root.root.string;
4498 }
4499
4500 do
4501 {
4502 /* Determine what (if any) linker stub is needed. */
4503 stub_type = arm_type_of_stub (info, section, irela,
4504 st_type, hash,
4505 destination, sym_sec,
4506 input_bfd, sym_name);
4507 if (stub_type == arm_stub_none)
4508 break;
4509
4510 /* Support for grouping stub sections. */
4511 id_sec = htab->stub_group[section->id].link_sec;
4512
4513 /* Get the name of this stub. */
4514 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4515 irela);
4516 if (!stub_name)
4517 goto error_ret_free_internal;
4518
4519 /* We've either created a stub for this reloc already,
4520 or we are about to. */
4521 created_stub = TRUE;
4522
4523 stub_entry = arm_stub_hash_lookup
4524 (&htab->stub_hash_table, stub_name,
4525 FALSE, FALSE);
4526 if (stub_entry != NULL)
4527 {
4528 /* The proper stub has already been created. */
4529 free (stub_name);
4530 stub_entry->target_value = sym_value;
4531 break;
4532 }
4533
4534 stub_entry = elf32_arm_add_stub (stub_name, section,
4535 htab);
4536 if (stub_entry == NULL)
4537 {
4538 free (stub_name);
4539 goto error_ret_free_internal;
4540 }
4541
4542 stub_entry->target_value = sym_value;
4543 stub_entry->target_section = sym_sec;
4544 stub_entry->stub_type = stub_type;
4545 stub_entry->h = hash;
4546 stub_entry->st_type = st_type;
4547
4548 if (sym_name == NULL)
4549 sym_name = "unnamed";
4550 stub_entry->output_name = (char *)
4551 bfd_alloc (htab->stub_bfd,
4552 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4553 + strlen (sym_name));
4554 if (stub_entry->output_name == NULL)
4555 {
4556 free (stub_name);
4557 goto error_ret_free_internal;
4558 }
4559
4560 /* For historical reasons, use the existing names for
4561 ARM-to-Thumb and Thumb-to-ARM stubs. */
4562 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4563 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4564 && st_type != STT_ARM_TFUNC)
4565 sprintf (stub_entry->output_name,
4566 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4567 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4568 || (r_type == (unsigned int) R_ARM_JUMP24))
4569 && st_type == STT_ARM_TFUNC)
4570 sprintf (stub_entry->output_name,
4571 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4572 else
4573 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4574 sym_name);
4575
4576 stub_changed = TRUE;
4577 }
4578 while (0);
4579
4580 /* Look for relocations which might trigger Cortex-A8
4581 erratum. */
4582 if (htab->fix_cortex_a8
4583 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4584 || r_type == (unsigned int) R_ARM_THM_JUMP19
4585 || r_type == (unsigned int) R_ARM_THM_CALL
4586 || r_type == (unsigned int) R_ARM_THM_XPC22))
4587 {
4588 bfd_vma from = section->output_section->vma
4589 + section->output_offset
4590 + irela->r_offset;
4591
4592 if ((from & 0xfff) == 0xffe)
4593 {
4594 /* Found a candidate. Note we haven't checked the
4595 destination is within 4K here: if we do so (and
4596 don't create an entry in a8_relocs) we can't tell
4597 that a branch should have been relocated when
4598 scanning later. */
4599 if (num_a8_relocs == a8_reloc_table_size)
4600 {
4601 a8_reloc_table_size *= 2;
4602 a8_relocs = (struct a8_erratum_reloc *)
4603 bfd_realloc (a8_relocs,
4604 sizeof (struct a8_erratum_reloc)
4605 * a8_reloc_table_size);
4606 }
4607
4608 a8_relocs[num_a8_relocs].from = from;
4609 a8_relocs[num_a8_relocs].destination = destination;
4610 a8_relocs[num_a8_relocs].r_type = r_type;
4611 a8_relocs[num_a8_relocs].st_type = st_type;
4612 a8_relocs[num_a8_relocs].sym_name = sym_name;
4613 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4614
4615 num_a8_relocs++;
4616 }
4617 }
4618 }
4619
4620 /* We're done with the internal relocs, free them. */
4621 if (elf_section_data (section)->relocs == NULL)
4622 free (internal_relocs);
4623 }
4624
4625 if (htab->fix_cortex_a8)
4626 {
4627 /* Sort relocs which might apply to Cortex-A8 erratum. */
4628 qsort (a8_relocs, num_a8_relocs,
4629 sizeof (struct a8_erratum_reloc),
4630 &a8_reloc_compare);
4631
4632 /* Scan for branches which might trigger Cortex-A8 erratum. */
4633 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4634 &num_a8_fixes, &a8_fix_table_size,
4635 a8_relocs, num_a8_relocs,
4636 prev_num_a8_fixes, &stub_changed)
4637 != 0)
4638 goto error_ret_free_local;
4639 }
4640 }
4641
4642 if (prev_num_a8_fixes != num_a8_fixes)
4643 stub_changed = TRUE;
4644
4645 if (!stub_changed)
4646 break;
4647
4648 /* OK, we've added some stubs. Find out the new size of the
4649 stub sections. */
4650 for (stub_sec = htab->stub_bfd->sections;
4651 stub_sec != NULL;
4652 stub_sec = stub_sec->next)
4653 {
4654 /* Ignore non-stub sections. */
4655 if (!strstr (stub_sec->name, STUB_SUFFIX))
4656 continue;
4657
4658 stub_sec->size = 0;
4659 }
4660
4661 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4662
4663 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4664 if (htab->fix_cortex_a8)
4665 for (i = 0; i < num_a8_fixes; i++)
4666 {
4667 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4668 a8_fixes[i].section, htab);
4669
4670 if (stub_sec == NULL)
4671 goto error_ret_free_local;
4672
4673 stub_sec->size
4674 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4675 NULL);
4676 }
4677
4678
4679 /* Ask the linker to do its stuff. */
4680 (*htab->layout_sections_again) ();
4681 }
4682
4683 /* Add stubs for Cortex-A8 erratum fixes now. */
4684 if (htab->fix_cortex_a8)
4685 {
4686 for (i = 0; i < num_a8_fixes; i++)
4687 {
4688 struct elf32_arm_stub_hash_entry *stub_entry;
4689 char *stub_name = a8_fixes[i].stub_name;
4690 asection *section = a8_fixes[i].section;
4691 unsigned int section_id = a8_fixes[i].section->id;
4692 asection *link_sec = htab->stub_group[section_id].link_sec;
4693 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4694 const insn_sequence *template_sequence;
4695 int template_size, size = 0;
4696
4697 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4698 TRUE, FALSE);
4699 if (stub_entry == NULL)
4700 {
4701 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4702 section->owner,
4703 stub_name);
4704 return FALSE;
4705 }
4706
4707 stub_entry->stub_sec = stub_sec;
4708 stub_entry->stub_offset = 0;
4709 stub_entry->id_sec = link_sec;
4710 stub_entry->stub_type = a8_fixes[i].stub_type;
4711 stub_entry->target_section = a8_fixes[i].section;
4712 stub_entry->target_value = a8_fixes[i].offset;
4713 stub_entry->target_addend = a8_fixes[i].addend;
4714 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4715 stub_entry->st_type = STT_ARM_TFUNC;
4716
4717 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4718 &template_sequence,
4719 &template_size);
4720
4721 stub_entry->stub_size = size;
4722 stub_entry->stub_template = template_sequence;
4723 stub_entry->stub_template_size = template_size;
4724 }
4725
4726 /* Stash the Cortex-A8 erratum fix array for use later in
4727 elf32_arm_write_section(). */
4728 htab->a8_erratum_fixes = a8_fixes;
4729 htab->num_a8_erratum_fixes = num_a8_fixes;
4730 }
4731 else
4732 {
4733 htab->a8_erratum_fixes = NULL;
4734 htab->num_a8_erratum_fixes = 0;
4735 }
4736 return TRUE;
4737
4738 error_ret_free_local:
4739 return FALSE;
4740 }
4741
4742 /* Build all the stubs associated with the current output file. The
4743 stubs are kept in a hash table attached to the main linker hash
4744 table. We also set up the .plt entries for statically linked PIC
4745 functions here. This function is called via arm_elf_finish in the
4746 linker. */
4747
4748 bfd_boolean
4749 elf32_arm_build_stubs (struct bfd_link_info *info)
4750 {
4751 asection *stub_sec;
4752 struct bfd_hash_table *table;
4753 struct elf32_arm_link_hash_table *htab;
4754
4755 htab = elf32_arm_hash_table (info);
4756
4757 for (stub_sec = htab->stub_bfd->sections;
4758 stub_sec != NULL;
4759 stub_sec = stub_sec->next)
4760 {
4761 bfd_size_type size;
4762
4763 /* Ignore non-stub sections. */
4764 if (!strstr (stub_sec->name, STUB_SUFFIX))
4765 continue;
4766
4767 /* Allocate memory to hold the linker stubs. */
4768 size = stub_sec->size;
4769 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4770 if (stub_sec->contents == NULL && size != 0)
4771 return FALSE;
4772 stub_sec->size = 0;
4773 }
4774
4775 /* Build the stubs as directed by the stub hash table. */
4776 table = &htab->stub_hash_table;
4777 bfd_hash_traverse (table, arm_build_one_stub, info);
4778 if (htab->fix_cortex_a8)
4779 {
4780 /* Place the cortex a8 stubs last. */
4781 htab->fix_cortex_a8 = -1;
4782 bfd_hash_traverse (table, arm_build_one_stub, info);
4783 }
4784
4785 return TRUE;
4786 }
4787
4788 /* Locate the Thumb encoded calling stub for NAME. */
4789
4790 static struct elf_link_hash_entry *
4791 find_thumb_glue (struct bfd_link_info *link_info,
4792 const char *name,
4793 char **error_message)
4794 {
4795 char *tmp_name;
4796 struct elf_link_hash_entry *hash;
4797 struct elf32_arm_link_hash_table *hash_table;
4798
4799 /* We need a pointer to the armelf specific hash table. */
4800 hash_table = elf32_arm_hash_table (link_info);
4801
4802 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4803 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4804
4805 BFD_ASSERT (tmp_name);
4806
4807 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4808
4809 hash = elf_link_hash_lookup
4810 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4811
4812 if (hash == NULL
4813 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4814 tmp_name, name) == -1)
4815 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4816
4817 free (tmp_name);
4818
4819 return hash;
4820 }
4821
4822 /* Locate the ARM encoded calling stub for NAME. */
4823
4824 static struct elf_link_hash_entry *
4825 find_arm_glue (struct bfd_link_info *link_info,
4826 const char *name,
4827 char **error_message)
4828 {
4829 char *tmp_name;
4830 struct elf_link_hash_entry *myh;
4831 struct elf32_arm_link_hash_table *hash_table;
4832
4833 /* We need a pointer to the elfarm specific hash table. */
4834 hash_table = elf32_arm_hash_table (link_info);
4835
4836 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4837 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4838
4839 BFD_ASSERT (tmp_name);
4840
4841 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4842
4843 myh = elf_link_hash_lookup
4844 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4845
4846 if (myh == NULL
4847 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4848 tmp_name, name) == -1)
4849 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4850
4851 free (tmp_name);
4852
4853 return myh;
4854 }
4855
4856 /* ARM->Thumb glue (static images):
4857
4858 .arm
4859 __func_from_arm:
4860 ldr r12, __func_addr
4861 bx r12
4862 __func_addr:
4863 .word func @ behave as if you saw a ARM_32 reloc.
4864
4865 (v5t static images)
4866 .arm
4867 __func_from_arm:
4868 ldr pc, __func_addr
4869 __func_addr:
4870 .word func @ behave as if you saw a ARM_32 reloc.
4871
4872 (relocatable images)
4873 .arm
4874 __func_from_arm:
4875 ldr r12, __func_offset
4876 add r12, r12, pc
4877 bx r12
4878 __func_offset:
4879 .word func - . */
4880
4881 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4882 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4883 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4884 static const insn32 a2t3_func_addr_insn = 0x00000001;
4885
4886 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4887 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4888 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4889
4890 #define ARM2THUMB_PIC_GLUE_SIZE 16
4891 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4892 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4893 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4894
4895 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4896
4897 .thumb .thumb
4898 .align 2 .align 2
4899 __func_from_thumb: __func_from_thumb:
4900 bx pc push {r6, lr}
4901 nop ldr r6, __func_addr
4902 .arm mov lr, pc
4903 b func bx r6
4904 .arm
4905 ;; back_to_thumb
4906 ldmia r13! {r6, lr}
4907 bx lr
4908 __func_addr:
4909 .word func */
4910
4911 #define THUMB2ARM_GLUE_SIZE 8
4912 static const insn16 t2a1_bx_pc_insn = 0x4778;
4913 static const insn16 t2a2_noop_insn = 0x46c0;
4914 static const insn32 t2a3_b_insn = 0xea000000;
4915
4916 #define VFP11_ERRATUM_VENEER_SIZE 8
4917
4918 #define ARM_BX_VENEER_SIZE 12
4919 static const insn32 armbx1_tst_insn = 0xe3100001;
4920 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4921 static const insn32 armbx3_bx_insn = 0xe12fff10;
4922
4923 #ifndef ELFARM_NABI_C_INCLUDED
4924 static void
4925 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4926 {
4927 asection * s;
4928 bfd_byte * contents;
4929
4930 if (size == 0)
4931 {
4932 /* Do not include empty glue sections in the output. */
4933 if (abfd != NULL)
4934 {
4935 s = bfd_get_section_by_name (abfd, name);
4936 if (s != NULL)
4937 s->flags |= SEC_EXCLUDE;
4938 }
4939 return;
4940 }
4941
4942 BFD_ASSERT (abfd != NULL);
4943
4944 s = bfd_get_section_by_name (abfd, name);
4945 BFD_ASSERT (s != NULL);
4946
4947 contents = (bfd_byte *) bfd_alloc (abfd, size);
4948
4949 BFD_ASSERT (s->size == size);
4950 s->contents = contents;
4951 }
4952
4953 bfd_boolean
4954 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4955 {
4956 struct elf32_arm_link_hash_table * globals;
4957
4958 globals = elf32_arm_hash_table (info);
4959 BFD_ASSERT (globals != NULL);
4960
4961 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4962 globals->arm_glue_size,
4963 ARM2THUMB_GLUE_SECTION_NAME);
4964
4965 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4966 globals->thumb_glue_size,
4967 THUMB2ARM_GLUE_SECTION_NAME);
4968
4969 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4970 globals->vfp11_erratum_glue_size,
4971 VFP11_ERRATUM_VENEER_SECTION_NAME);
4972
4973 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4974 globals->bx_glue_size,
4975 ARM_BX_GLUE_SECTION_NAME);
4976
4977 return TRUE;
4978 }
4979
4980 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4981 returns the symbol identifying the stub. */
4982
4983 static struct elf_link_hash_entry *
4984 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4985 struct elf_link_hash_entry * h)
4986 {
4987 const char * name = h->root.root.string;
4988 asection * s;
4989 char * tmp_name;
4990 struct elf_link_hash_entry * myh;
4991 struct bfd_link_hash_entry * bh;
4992 struct elf32_arm_link_hash_table * globals;
4993 bfd_vma val;
4994 bfd_size_type size;
4995
4996 globals = elf32_arm_hash_table (link_info);
4997
4998 BFD_ASSERT (globals != NULL);
4999 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5000
5001 s = bfd_get_section_by_name
5002 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5003
5004 BFD_ASSERT (s != NULL);
5005
5006 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5007 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5008
5009 BFD_ASSERT (tmp_name);
5010
5011 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5012
5013 myh = elf_link_hash_lookup
5014 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5015
5016 if (myh != NULL)
5017 {
5018 /* We've already seen this guy. */
5019 free (tmp_name);
5020 return myh;
5021 }
5022
5023 /* The only trick here is using hash_table->arm_glue_size as the value.
5024 Even though the section isn't allocated yet, this is where we will be
5025 putting it. The +1 on the value marks that the stub has not been
5026 output yet - not that it is a Thumb function. */
5027 bh = NULL;
5028 val = globals->arm_glue_size + 1;
5029 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5030 tmp_name, BSF_GLOBAL, s, val,
5031 NULL, TRUE, FALSE, &bh);
5032
5033 myh = (struct elf_link_hash_entry *) bh;
5034 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5035 myh->forced_local = 1;
5036
5037 free (tmp_name);
5038
5039 if (link_info->shared || globals->root.is_relocatable_executable
5040 || globals->pic_veneer)
5041 size = ARM2THUMB_PIC_GLUE_SIZE;
5042 else if (globals->use_blx)
5043 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5044 else
5045 size = ARM2THUMB_STATIC_GLUE_SIZE;
5046
5047 s->size += size;
5048 globals->arm_glue_size += size;
5049
5050 return myh;
5051 }
5052
5053 /* Allocate space for ARMv4 BX veneers. */
5054
5055 static void
5056 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5057 {
5058 asection * s;
5059 struct elf32_arm_link_hash_table *globals;
5060 char *tmp_name;
5061 struct elf_link_hash_entry *myh;
5062 struct bfd_link_hash_entry *bh;
5063 bfd_vma val;
5064
5065 /* BX PC does not need a veneer. */
5066 if (reg == 15)
5067 return;
5068
5069 globals = elf32_arm_hash_table (link_info);
5070
5071 BFD_ASSERT (globals != NULL);
5072 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5073
5074 /* Check if this veneer has already been allocated. */
5075 if (globals->bx_glue_offset[reg])
5076 return;
5077
5078 s = bfd_get_section_by_name
5079 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5080
5081 BFD_ASSERT (s != NULL);
5082
5083 /* Add symbol for veneer. */
5084 tmp_name = (char *)
5085 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5086
5087 BFD_ASSERT (tmp_name);
5088
5089 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5090
5091 myh = elf_link_hash_lookup
5092 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5093
5094 BFD_ASSERT (myh == NULL);
5095
5096 bh = NULL;
5097 val = globals->bx_glue_size;
5098 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5099 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5100 NULL, TRUE, FALSE, &bh);
5101
5102 myh = (struct elf_link_hash_entry *) bh;
5103 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5104 myh->forced_local = 1;
5105
5106 s->size += ARM_BX_VENEER_SIZE;
5107 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5108 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5109 }
5110
5111
5112 /* Add an entry to the code/data map for section SEC. */
5113
5114 static void
5115 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5116 {
5117 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5118 unsigned int newidx;
5119
5120 if (sec_data->map == NULL)
5121 {
5122 sec_data->map = (elf32_arm_section_map *)
5123 bfd_malloc (sizeof (elf32_arm_section_map));
5124 sec_data->mapcount = 0;
5125 sec_data->mapsize = 1;
5126 }
5127
5128 newidx = sec_data->mapcount++;
5129
5130 if (sec_data->mapcount > sec_data->mapsize)
5131 {
5132 sec_data->mapsize *= 2;
5133 sec_data->map = (elf32_arm_section_map *)
5134 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5135 * sizeof (elf32_arm_section_map));
5136 }
5137
5138 if (sec_data->map)
5139 {
5140 sec_data->map[newidx].vma = vma;
5141 sec_data->map[newidx].type = type;
5142 }
5143 }
5144
5145
5146 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5147 veneers are handled for now. */
5148
5149 static bfd_vma
5150 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5151 elf32_vfp11_erratum_list *branch,
5152 bfd *branch_bfd,
5153 asection *branch_sec,
5154 unsigned int offset)
5155 {
5156 asection *s;
5157 struct elf32_arm_link_hash_table *hash_table;
5158 char *tmp_name;
5159 struct elf_link_hash_entry *myh;
5160 struct bfd_link_hash_entry *bh;
5161 bfd_vma val;
5162 struct _arm_elf_section_data *sec_data;
5163 int errcount;
5164 elf32_vfp11_erratum_list *newerr;
5165
5166 hash_table = elf32_arm_hash_table (link_info);
5167
5168 BFD_ASSERT (hash_table != NULL);
5169 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5170
5171 s = bfd_get_section_by_name
5172 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5173
5174 sec_data = elf32_arm_section_data (s);
5175
5176 BFD_ASSERT (s != NULL);
5177
5178 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5179 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5180
5181 BFD_ASSERT (tmp_name);
5182
5183 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5184 hash_table->num_vfp11_fixes);
5185
5186 myh = elf_link_hash_lookup
5187 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5188
5189 BFD_ASSERT (myh == NULL);
5190
5191 bh = NULL;
5192 val = hash_table->vfp11_erratum_glue_size;
5193 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5194 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5195 NULL, TRUE, FALSE, &bh);
5196
5197 myh = (struct elf_link_hash_entry *) bh;
5198 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5199 myh->forced_local = 1;
5200
5201 /* Link veneer back to calling location. */
5202 errcount = ++(sec_data->erratumcount);
5203 newerr = (elf32_vfp11_erratum_list *)
5204 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5205
5206 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5207 newerr->vma = -1;
5208 newerr->u.v.branch = branch;
5209 newerr->u.v.id = hash_table->num_vfp11_fixes;
5210 branch->u.b.veneer = newerr;
5211
5212 newerr->next = sec_data->erratumlist;
5213 sec_data->erratumlist = newerr;
5214
5215 /* A symbol for the return from the veneer. */
5216 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5217 hash_table->num_vfp11_fixes);
5218
5219 myh = elf_link_hash_lookup
5220 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5221
5222 if (myh != NULL)
5223 abort ();
5224
5225 bh = NULL;
5226 val = offset + 4;
5227 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5228 branch_sec, val, NULL, TRUE, FALSE, &bh);
5229
5230 myh = (struct elf_link_hash_entry *) bh;
5231 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5232 myh->forced_local = 1;
5233
5234 free (tmp_name);
5235
5236 /* Generate a mapping symbol for the veneer section, and explicitly add an
5237 entry for that symbol to the code/data map for the section. */
5238 if (hash_table->vfp11_erratum_glue_size == 0)
5239 {
5240 bh = NULL;
5241 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5242 ever requires this erratum fix. */
5243 _bfd_generic_link_add_one_symbol (link_info,
5244 hash_table->bfd_of_glue_owner, "$a",
5245 BSF_LOCAL, s, 0, NULL,
5246 TRUE, FALSE, &bh);
5247
5248 myh = (struct elf_link_hash_entry *) bh;
5249 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5250 myh->forced_local = 1;
5251
5252 /* The elf32_arm_init_maps function only cares about symbols from input
5253 BFDs. We must make a note of this generated mapping symbol
5254 ourselves so that code byteswapping works properly in
5255 elf32_arm_write_section. */
5256 elf32_arm_section_map_add (s, 'a', 0);
5257 }
5258
5259 s->size += VFP11_ERRATUM_VENEER_SIZE;
5260 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5261 hash_table->num_vfp11_fixes++;
5262
5263 /* The offset of the veneer. */
5264 return val;
5265 }
5266
5267 #define ARM_GLUE_SECTION_FLAGS \
5268 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5269 | SEC_READONLY | SEC_LINKER_CREATED)
5270
5271 /* Create a fake section for use by the ARM backend of the linker. */
5272
5273 static bfd_boolean
5274 arm_make_glue_section (bfd * abfd, const char * name)
5275 {
5276 asection * sec;
5277
5278 sec = bfd_get_section_by_name (abfd, name);
5279 if (sec != NULL)
5280 /* Already made. */
5281 return TRUE;
5282
5283 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5284
5285 if (sec == NULL
5286 || !bfd_set_section_alignment (abfd, sec, 2))
5287 return FALSE;
5288
5289 /* Set the gc mark to prevent the section from being removed by garbage
5290 collection, despite the fact that no relocs refer to this section. */
5291 sec->gc_mark = 1;
5292
5293 return TRUE;
5294 }
5295
5296 /* Add the glue sections to ABFD. This function is called from the
5297 linker scripts in ld/emultempl/{armelf}.em. */
5298
5299 bfd_boolean
5300 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5301 struct bfd_link_info *info)
5302 {
5303 /* If we are only performing a partial
5304 link do not bother adding the glue. */
5305 if (info->relocatable)
5306 return TRUE;
5307
5308 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5309 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5310 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5311 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5312 }
5313
5314 /* Select a BFD to be used to hold the sections used by the glue code.
5315 This function is called from the linker scripts in ld/emultempl/
5316 {armelf/pe}.em. */
5317
5318 bfd_boolean
5319 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5320 {
5321 struct elf32_arm_link_hash_table *globals;
5322
5323 /* If we are only performing a partial link
5324 do not bother getting a bfd to hold the glue. */
5325 if (info->relocatable)
5326 return TRUE;
5327
5328 /* Make sure we don't attach the glue sections to a dynamic object. */
5329 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5330
5331 globals = elf32_arm_hash_table (info);
5332
5333 BFD_ASSERT (globals != NULL);
5334
5335 if (globals->bfd_of_glue_owner != NULL)
5336 return TRUE;
5337
5338 /* Save the bfd for later use. */
5339 globals->bfd_of_glue_owner = abfd;
5340
5341 return TRUE;
5342 }
5343
5344 static void
5345 check_use_blx (struct elf32_arm_link_hash_table *globals)
5346 {
5347 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5348 Tag_CPU_arch) > 2)
5349 globals->use_blx = 1;
5350 }
5351
5352 bfd_boolean
5353 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5354 struct bfd_link_info *link_info)
5355 {
5356 Elf_Internal_Shdr *symtab_hdr;
5357 Elf_Internal_Rela *internal_relocs = NULL;
5358 Elf_Internal_Rela *irel, *irelend;
5359 bfd_byte *contents = NULL;
5360
5361 asection *sec;
5362 struct elf32_arm_link_hash_table *globals;
5363
5364 /* If we are only performing a partial link do not bother
5365 to construct any glue. */
5366 if (link_info->relocatable)
5367 return TRUE;
5368
5369 /* Here we have a bfd that is to be included on the link. We have a
5370 hook to do reloc rummaging, before section sizes are nailed down. */
5371 globals = elf32_arm_hash_table (link_info);
5372
5373 BFD_ASSERT (globals != NULL);
5374
5375 check_use_blx (globals);
5376
5377 if (globals->byteswap_code && !bfd_big_endian (abfd))
5378 {
5379 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5380 abfd);
5381 return FALSE;
5382 }
5383
5384 /* PR 5398: If we have not decided to include any loadable sections in
5385 the output then we will not have a glue owner bfd. This is OK, it
5386 just means that there is nothing else for us to do here. */
5387 if (globals->bfd_of_glue_owner == NULL)
5388 return TRUE;
5389
5390 /* Rummage around all the relocs and map the glue vectors. */
5391 sec = abfd->sections;
5392
5393 if (sec == NULL)
5394 return TRUE;
5395
5396 for (; sec != NULL; sec = sec->next)
5397 {
5398 if (sec->reloc_count == 0)
5399 continue;
5400
5401 if ((sec->flags & SEC_EXCLUDE) != 0)
5402 continue;
5403
5404 symtab_hdr = & elf_symtab_hdr (abfd);
5405
5406 /* Load the relocs. */
5407 internal_relocs
5408 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5409
5410 if (internal_relocs == NULL)
5411 goto error_return;
5412
5413 irelend = internal_relocs + sec->reloc_count;
5414 for (irel = internal_relocs; irel < irelend; irel++)
5415 {
5416 long r_type;
5417 unsigned long r_index;
5418
5419 struct elf_link_hash_entry *h;
5420
5421 r_type = ELF32_R_TYPE (irel->r_info);
5422 r_index = ELF32_R_SYM (irel->r_info);
5423
5424 /* These are the only relocation types we care about. */
5425 if ( r_type != R_ARM_PC24
5426 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5427 continue;
5428
5429 /* Get the section contents if we haven't done so already. */
5430 if (contents == NULL)
5431 {
5432 /* Get cached copy if it exists. */
5433 if (elf_section_data (sec)->this_hdr.contents != NULL)
5434 contents = elf_section_data (sec)->this_hdr.contents;
5435 else
5436 {
5437 /* Go get them off disk. */
5438 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5439 goto error_return;
5440 }
5441 }
5442
5443 if (r_type == R_ARM_V4BX)
5444 {
5445 int reg;
5446
5447 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5448 record_arm_bx_glue (link_info, reg);
5449 continue;
5450 }
5451
5452 /* If the relocation is not against a symbol it cannot concern us. */
5453 h = NULL;
5454
5455 /* We don't care about local symbols. */
5456 if (r_index < symtab_hdr->sh_info)
5457 continue;
5458
5459 /* This is an external symbol. */
5460 r_index -= symtab_hdr->sh_info;
5461 h = (struct elf_link_hash_entry *)
5462 elf_sym_hashes (abfd)[r_index];
5463
5464 /* If the relocation is against a static symbol it must be within
5465 the current section and so cannot be a cross ARM/Thumb relocation. */
5466 if (h == NULL)
5467 continue;
5468
5469 /* If the call will go through a PLT entry then we do not need
5470 glue. */
5471 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5472 continue;
5473
5474 switch (r_type)
5475 {
5476 case R_ARM_PC24:
5477 /* This one is a call from arm code. We need to look up
5478 the target of the call. If it is a thumb target, we
5479 insert glue. */
5480 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5481 record_arm_to_thumb_glue (link_info, h);
5482 break;
5483
5484 default:
5485 abort ();
5486 }
5487 }
5488
5489 if (contents != NULL
5490 && elf_section_data (sec)->this_hdr.contents != contents)
5491 free (contents);
5492 contents = NULL;
5493
5494 if (internal_relocs != NULL
5495 && elf_section_data (sec)->relocs != internal_relocs)
5496 free (internal_relocs);
5497 internal_relocs = NULL;
5498 }
5499
5500 return TRUE;
5501
5502 error_return:
5503 if (contents != NULL
5504 && elf_section_data (sec)->this_hdr.contents != contents)
5505 free (contents);
5506 if (internal_relocs != NULL
5507 && elf_section_data (sec)->relocs != internal_relocs)
5508 free (internal_relocs);
5509
5510 return FALSE;
5511 }
5512 #endif
5513
5514
5515 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5516
5517 void
5518 bfd_elf32_arm_init_maps (bfd *abfd)
5519 {
5520 Elf_Internal_Sym *isymbuf;
5521 Elf_Internal_Shdr *hdr;
5522 unsigned int i, localsyms;
5523
5524 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5525 if (! is_arm_elf (abfd))
5526 return;
5527
5528 if ((abfd->flags & DYNAMIC) != 0)
5529 return;
5530
5531 hdr = & elf_symtab_hdr (abfd);
5532 localsyms = hdr->sh_info;
5533
5534 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5535 should contain the number of local symbols, which should come before any
5536 global symbols. Mapping symbols are always local. */
5537 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5538 NULL);
5539
5540 /* No internal symbols read? Skip this BFD. */
5541 if (isymbuf == NULL)
5542 return;
5543
5544 for (i = 0; i < localsyms; i++)
5545 {
5546 Elf_Internal_Sym *isym = &isymbuf[i];
5547 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5548 const char *name;
5549
5550 if (sec != NULL
5551 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5552 {
5553 name = bfd_elf_string_from_elf_section (abfd,
5554 hdr->sh_link, isym->st_name);
5555
5556 if (bfd_is_arm_special_symbol_name (name,
5557 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5558 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5559 }
5560 }
5561 }
5562
5563
5564 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5565 say what they wanted. */
5566
5567 void
5568 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5569 {
5570 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5571 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5572
5573 if (globals->fix_cortex_a8 == -1)
5574 {
5575 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5576 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5577 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5578 || out_attr[Tag_CPU_arch_profile].i == 0))
5579 globals->fix_cortex_a8 = 1;
5580 else
5581 globals->fix_cortex_a8 = 0;
5582 }
5583 }
5584
5585
5586 void
5587 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5588 {
5589 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5590 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5591
5592 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5593 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5594 {
5595 switch (globals->vfp11_fix)
5596 {
5597 case BFD_ARM_VFP11_FIX_DEFAULT:
5598 case BFD_ARM_VFP11_FIX_NONE:
5599 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5600 break;
5601
5602 default:
5603 /* Give a warning, but do as the user requests anyway. */
5604 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5605 "workaround is not necessary for target architecture"), obfd);
5606 }
5607 }
5608 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5609 /* For earlier architectures, we might need the workaround, but do not
5610 enable it by default. If users is running with broken hardware, they
5611 must enable the erratum fix explicitly. */
5612 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5613 }
5614
5615
5616 enum bfd_arm_vfp11_pipe
5617 {
5618 VFP11_FMAC,
5619 VFP11_LS,
5620 VFP11_DS,
5621 VFP11_BAD
5622 };
5623
5624 /* Return a VFP register number. This is encoded as RX:X for single-precision
5625 registers, or X:RX for double-precision registers, where RX is the group of
5626 four bits in the instruction encoding and X is the single extension bit.
5627 RX and X fields are specified using their lowest (starting) bit. The return
5628 value is:
5629
5630 0...31: single-precision registers s0...s31
5631 32...63: double-precision registers d0...d31.
5632
5633 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5634 encounter VFP3 instructions, so we allow the full range for DP registers. */
5635
5636 static unsigned int
5637 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5638 unsigned int x)
5639 {
5640 if (is_double)
5641 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5642 else
5643 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5644 }
5645
5646 /* Set bits in *WMASK according to a register number REG as encoded by
5647 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5648
5649 static void
5650 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5651 {
5652 if (reg < 32)
5653 *wmask |= 1 << reg;
5654 else if (reg < 48)
5655 *wmask |= 3 << ((reg - 32) * 2);
5656 }
5657
5658 /* Return TRUE if WMASK overwrites anything in REGS. */
5659
5660 static bfd_boolean
5661 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5662 {
5663 int i;
5664
5665 for (i = 0; i < numregs; i++)
5666 {
5667 unsigned int reg = regs[i];
5668
5669 if (reg < 32 && (wmask & (1 << reg)) != 0)
5670 return TRUE;
5671
5672 reg -= 32;
5673
5674 if (reg >= 16)
5675 continue;
5676
5677 if ((wmask & (3 << (reg * 2))) != 0)
5678 return TRUE;
5679 }
5680
5681 return FALSE;
5682 }
5683
5684 /* In this function, we're interested in two things: finding input registers
5685 for VFP data-processing instructions, and finding the set of registers which
5686 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5687 hold the written set, so FLDM etc. are easy to deal with (we're only
5688 interested in 32 SP registers or 16 dp registers, due to the VFP version
5689 implemented by the chip in question). DP registers are marked by setting
5690 both SP registers in the write mask). */
5691
5692 static enum bfd_arm_vfp11_pipe
5693 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5694 int *numregs)
5695 {
5696 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5697 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5698
5699 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5700 {
5701 unsigned int pqrs;
5702 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5703 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5704
5705 pqrs = ((insn & 0x00800000) >> 20)
5706 | ((insn & 0x00300000) >> 19)
5707 | ((insn & 0x00000040) >> 6);
5708
5709 switch (pqrs)
5710 {
5711 case 0: /* fmac[sd]. */
5712 case 1: /* fnmac[sd]. */
5713 case 2: /* fmsc[sd]. */
5714 case 3: /* fnmsc[sd]. */
5715 pipe = VFP11_FMAC;
5716 bfd_arm_vfp11_write_mask (destmask, fd);
5717 regs[0] = fd;
5718 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5719 regs[2] = fm;
5720 *numregs = 3;
5721 break;
5722
5723 case 4: /* fmul[sd]. */
5724 case 5: /* fnmul[sd]. */
5725 case 6: /* fadd[sd]. */
5726 case 7: /* fsub[sd]. */
5727 pipe = VFP11_FMAC;
5728 goto vfp_binop;
5729
5730 case 8: /* fdiv[sd]. */
5731 pipe = VFP11_DS;
5732 vfp_binop:
5733 bfd_arm_vfp11_write_mask (destmask, fd);
5734 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5735 regs[1] = fm;
5736 *numregs = 2;
5737 break;
5738
5739 case 15: /* extended opcode. */
5740 {
5741 unsigned int extn = ((insn >> 15) & 0x1e)
5742 | ((insn >> 7) & 1);
5743
5744 switch (extn)
5745 {
5746 case 0: /* fcpy[sd]. */
5747 case 1: /* fabs[sd]. */
5748 case 2: /* fneg[sd]. */
5749 case 8: /* fcmp[sd]. */
5750 case 9: /* fcmpe[sd]. */
5751 case 10: /* fcmpz[sd]. */
5752 case 11: /* fcmpez[sd]. */
5753 case 16: /* fuito[sd]. */
5754 case 17: /* fsito[sd]. */
5755 case 24: /* ftoui[sd]. */
5756 case 25: /* ftouiz[sd]. */
5757 case 26: /* ftosi[sd]. */
5758 case 27: /* ftosiz[sd]. */
5759 /* These instructions will not bounce due to underflow. */
5760 *numregs = 0;
5761 pipe = VFP11_FMAC;
5762 break;
5763
5764 case 3: /* fsqrt[sd]. */
5765 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5766 registers to cause the erratum in previous instructions. */
5767 bfd_arm_vfp11_write_mask (destmask, fd);
5768 pipe = VFP11_DS;
5769 break;
5770
5771 case 15: /* fcvt{ds,sd}. */
5772 {
5773 int rnum = 0;
5774
5775 bfd_arm_vfp11_write_mask (destmask, fd);
5776
5777 /* Only FCVTSD can underflow. */
5778 if ((insn & 0x100) != 0)
5779 regs[rnum++] = fm;
5780
5781 *numregs = rnum;
5782
5783 pipe = VFP11_FMAC;
5784 }
5785 break;
5786
5787 default:
5788 return VFP11_BAD;
5789 }
5790 }
5791 break;
5792
5793 default:
5794 return VFP11_BAD;
5795 }
5796 }
5797 /* Two-register transfer. */
5798 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5799 {
5800 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5801
5802 if ((insn & 0x100000) == 0)
5803 {
5804 if (is_double)
5805 bfd_arm_vfp11_write_mask (destmask, fm);
5806 else
5807 {
5808 bfd_arm_vfp11_write_mask (destmask, fm);
5809 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5810 }
5811 }
5812
5813 pipe = VFP11_LS;
5814 }
5815 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5816 {
5817 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5818 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5819
5820 switch (puw)
5821 {
5822 case 0: /* Two-reg transfer. We should catch these above. */
5823 abort ();
5824
5825 case 2: /* fldm[sdx]. */
5826 case 3:
5827 case 5:
5828 {
5829 unsigned int i, offset = insn & 0xff;
5830
5831 if (is_double)
5832 offset >>= 1;
5833
5834 for (i = fd; i < fd + offset; i++)
5835 bfd_arm_vfp11_write_mask (destmask, i);
5836 }
5837 break;
5838
5839 case 4: /* fld[sd]. */
5840 case 6:
5841 bfd_arm_vfp11_write_mask (destmask, fd);
5842 break;
5843
5844 default:
5845 return VFP11_BAD;
5846 }
5847
5848 pipe = VFP11_LS;
5849 }
5850 /* Single-register transfer. Note L==0. */
5851 else if ((insn & 0x0f100e10) == 0x0e000a10)
5852 {
5853 unsigned int opcode = (insn >> 21) & 7;
5854 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5855
5856 switch (opcode)
5857 {
5858 case 0: /* fmsr/fmdlr. */
5859 case 1: /* fmdhr. */
5860 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5861 destination register. I don't know if this is exactly right,
5862 but it is the conservative choice. */
5863 bfd_arm_vfp11_write_mask (destmask, fn);
5864 break;
5865
5866 case 7: /* fmxr. */
5867 break;
5868 }
5869
5870 pipe = VFP11_LS;
5871 }
5872
5873 return pipe;
5874 }
5875
5876
5877 static int elf32_arm_compare_mapping (const void * a, const void * b);
5878
5879
5880 /* Look for potentially-troublesome code sequences which might trigger the
5881 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5882 (available from ARM) for details of the erratum. A short version is
5883 described in ld.texinfo. */
5884
5885 bfd_boolean
5886 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5887 {
5888 asection *sec;
5889 bfd_byte *contents = NULL;
5890 int state = 0;
5891 int regs[3], numregs = 0;
5892 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5893 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5894
5895 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5896 The states transition as follows:
5897
5898 0 -> 1 (vector) or 0 -> 2 (scalar)
5899 A VFP FMAC-pipeline instruction has been seen. Fill
5900 regs[0]..regs[numregs-1] with its input operands. Remember this
5901 instruction in 'first_fmac'.
5902
5903 1 -> 2
5904 Any instruction, except for a VFP instruction which overwrites
5905 regs[*].
5906
5907 1 -> 3 [ -> 0 ] or
5908 2 -> 3 [ -> 0 ]
5909 A VFP instruction has been seen which overwrites any of regs[*].
5910 We must make a veneer! Reset state to 0 before examining next
5911 instruction.
5912
5913 2 -> 0
5914 If we fail to match anything in state 2, reset to state 0 and reset
5915 the instruction pointer to the instruction after 'first_fmac'.
5916
5917 If the VFP11 vector mode is in use, there must be at least two unrelated
5918 instructions between anti-dependent VFP11 instructions to properly avoid
5919 triggering the erratum, hence the use of the extra state 1. */
5920
5921 /* If we are only performing a partial link do not bother
5922 to construct any glue. */
5923 if (link_info->relocatable)
5924 return TRUE;
5925
5926 /* Skip if this bfd does not correspond to an ELF image. */
5927 if (! is_arm_elf (abfd))
5928 return TRUE;
5929
5930 /* We should have chosen a fix type by the time we get here. */
5931 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5932
5933 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5934 return TRUE;
5935
5936 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5937 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5938 return TRUE;
5939
5940 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5941 {
5942 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5943 struct _arm_elf_section_data *sec_data;
5944
5945 /* If we don't have executable progbits, we're not interested in this
5946 section. Also skip if section is to be excluded. */
5947 if (elf_section_type (sec) != SHT_PROGBITS
5948 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5949 || (sec->flags & SEC_EXCLUDE) != 0
5950 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5951 || sec->output_section == bfd_abs_section_ptr
5952 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5953 continue;
5954
5955 sec_data = elf32_arm_section_data (sec);
5956
5957 if (sec_data->mapcount == 0)
5958 continue;
5959
5960 if (elf_section_data (sec)->this_hdr.contents != NULL)
5961 contents = elf_section_data (sec)->this_hdr.contents;
5962 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5963 goto error_return;
5964
5965 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5966 elf32_arm_compare_mapping);
5967
5968 for (span = 0; span < sec_data->mapcount; span++)
5969 {
5970 unsigned int span_start = sec_data->map[span].vma;
5971 unsigned int span_end = (span == sec_data->mapcount - 1)
5972 ? sec->size : sec_data->map[span + 1].vma;
5973 char span_type = sec_data->map[span].type;
5974
5975 /* FIXME: Only ARM mode is supported at present. We may need to
5976 support Thumb-2 mode also at some point. */
5977 if (span_type != 'a')
5978 continue;
5979
5980 for (i = span_start; i < span_end;)
5981 {
5982 unsigned int next_i = i + 4;
5983 unsigned int insn = bfd_big_endian (abfd)
5984 ? (contents[i] << 24)
5985 | (contents[i + 1] << 16)
5986 | (contents[i + 2] << 8)
5987 | contents[i + 3]
5988 : (contents[i + 3] << 24)
5989 | (contents[i + 2] << 16)
5990 | (contents[i + 1] << 8)
5991 | contents[i];
5992 unsigned int writemask = 0;
5993 enum bfd_arm_vfp11_pipe pipe;
5994
5995 switch (state)
5996 {
5997 case 0:
5998 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5999 &numregs);
6000 /* I'm assuming the VFP11 erratum can trigger with denorm
6001 operands on either the FMAC or the DS pipeline. This might
6002 lead to slightly overenthusiastic veneer insertion. */
6003 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
6004 {
6005 state = use_vector ? 1 : 2;
6006 first_fmac = i;
6007 veneer_of_insn = insn;
6008 }
6009 break;
6010
6011 case 1:
6012 {
6013 int other_regs[3], other_numregs;
6014 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6015 other_regs,
6016 &other_numregs);
6017 if (pipe != VFP11_BAD
6018 && bfd_arm_vfp11_antidependency (writemask, regs,
6019 numregs))
6020 state = 3;
6021 else
6022 state = 2;
6023 }
6024 break;
6025
6026 case 2:
6027 {
6028 int other_regs[3], other_numregs;
6029 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6030 other_regs,
6031 &other_numregs);
6032 if (pipe != VFP11_BAD
6033 && bfd_arm_vfp11_antidependency (writemask, regs,
6034 numregs))
6035 state = 3;
6036 else
6037 {
6038 state = 0;
6039 next_i = first_fmac + 4;
6040 }
6041 }
6042 break;
6043
6044 case 3:
6045 abort (); /* Should be unreachable. */
6046 }
6047
6048 if (state == 3)
6049 {
6050 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6051 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6052 int errcount;
6053
6054 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6055
6056 newerr->u.b.vfp_insn = veneer_of_insn;
6057
6058 switch (span_type)
6059 {
6060 case 'a':
6061 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6062 break;
6063
6064 default:
6065 abort ();
6066 }
6067
6068 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6069 first_fmac);
6070
6071 newerr->vma = -1;
6072
6073 newerr->next = sec_data->erratumlist;
6074 sec_data->erratumlist = newerr;
6075
6076 state = 0;
6077 }
6078
6079 i = next_i;
6080 }
6081 }
6082
6083 if (contents != NULL
6084 && elf_section_data (sec)->this_hdr.contents != contents)
6085 free (contents);
6086 contents = NULL;
6087 }
6088
6089 return TRUE;
6090
6091 error_return:
6092 if (contents != NULL
6093 && elf_section_data (sec)->this_hdr.contents != contents)
6094 free (contents);
6095
6096 return FALSE;
6097 }
6098
6099 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6100 after sections have been laid out, using specially-named symbols. */
6101
6102 void
6103 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6104 struct bfd_link_info *link_info)
6105 {
6106 asection *sec;
6107 struct elf32_arm_link_hash_table *globals;
6108 char *tmp_name;
6109
6110 if (link_info->relocatable)
6111 return;
6112
6113 /* Skip if this bfd does not correspond to an ELF image. */
6114 if (! is_arm_elf (abfd))
6115 return;
6116
6117 globals = elf32_arm_hash_table (link_info);
6118
6119 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6120 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6121
6122 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6123 {
6124 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6125 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6126
6127 for (; errnode != NULL; errnode = errnode->next)
6128 {
6129 struct elf_link_hash_entry *myh;
6130 bfd_vma vma;
6131
6132 switch (errnode->type)
6133 {
6134 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6135 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6136 /* Find veneer symbol. */
6137 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6138 errnode->u.b.veneer->u.v.id);
6139
6140 myh = elf_link_hash_lookup
6141 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6142
6143 if (myh == NULL)
6144 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6145 "`%s'"), abfd, tmp_name);
6146
6147 vma = myh->root.u.def.section->output_section->vma
6148 + myh->root.u.def.section->output_offset
6149 + myh->root.u.def.value;
6150
6151 errnode->u.b.veneer->vma = vma;
6152 break;
6153
6154 case VFP11_ERRATUM_ARM_VENEER:
6155 case VFP11_ERRATUM_THUMB_VENEER:
6156 /* Find return location. */
6157 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6158 errnode->u.v.id);
6159
6160 myh = elf_link_hash_lookup
6161 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6162
6163 if (myh == NULL)
6164 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6165 "`%s'"), abfd, tmp_name);
6166
6167 vma = myh->root.u.def.section->output_section->vma
6168 + myh->root.u.def.section->output_offset
6169 + myh->root.u.def.value;
6170
6171 errnode->u.v.branch->vma = vma;
6172 break;
6173
6174 default:
6175 abort ();
6176 }
6177 }
6178 }
6179
6180 free (tmp_name);
6181 }
6182
6183
6184 /* Set target relocation values needed during linking. */
6185
6186 void
6187 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6188 struct bfd_link_info *link_info,
6189 int target1_is_rel,
6190 char * target2_type,
6191 int fix_v4bx,
6192 int use_blx,
6193 bfd_arm_vfp11_fix vfp11_fix,
6194 int no_enum_warn, int no_wchar_warn,
6195 int pic_veneer, int fix_cortex_a8)
6196 {
6197 struct elf32_arm_link_hash_table *globals;
6198
6199 globals = elf32_arm_hash_table (link_info);
6200
6201 globals->target1_is_rel = target1_is_rel;
6202 if (strcmp (target2_type, "rel") == 0)
6203 globals->target2_reloc = R_ARM_REL32;
6204 else if (strcmp (target2_type, "abs") == 0)
6205 globals->target2_reloc = R_ARM_ABS32;
6206 else if (strcmp (target2_type, "got-rel") == 0)
6207 globals->target2_reloc = R_ARM_GOT_PREL;
6208 else
6209 {
6210 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6211 target2_type);
6212 }
6213 globals->fix_v4bx = fix_v4bx;
6214 globals->use_blx |= use_blx;
6215 globals->vfp11_fix = vfp11_fix;
6216 globals->pic_veneer = pic_veneer;
6217 globals->fix_cortex_a8 = fix_cortex_a8;
6218
6219 BFD_ASSERT (is_arm_elf (output_bfd));
6220 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6221 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6222 }
6223
6224 /* Replace the target offset of a Thumb bl or b.w instruction. */
6225
6226 static void
6227 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6228 {
6229 bfd_vma upper;
6230 bfd_vma lower;
6231 int reloc_sign;
6232
6233 BFD_ASSERT ((offset & 1) == 0);
6234
6235 upper = bfd_get_16 (abfd, insn);
6236 lower = bfd_get_16 (abfd, insn + 2);
6237 reloc_sign = (offset < 0) ? 1 : 0;
6238 upper = (upper & ~(bfd_vma) 0x7ff)
6239 | ((offset >> 12) & 0x3ff)
6240 | (reloc_sign << 10);
6241 lower = (lower & ~(bfd_vma) 0x2fff)
6242 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6243 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6244 | ((offset >> 1) & 0x7ff);
6245 bfd_put_16 (abfd, upper, insn);
6246 bfd_put_16 (abfd, lower, insn + 2);
6247 }
6248
6249 /* Thumb code calling an ARM function. */
6250
6251 static int
6252 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6253 const char * name,
6254 bfd * input_bfd,
6255 bfd * output_bfd,
6256 asection * input_section,
6257 bfd_byte * hit_data,
6258 asection * sym_sec,
6259 bfd_vma offset,
6260 bfd_signed_vma addend,
6261 bfd_vma val,
6262 char **error_message)
6263 {
6264 asection * s = 0;
6265 bfd_vma my_offset;
6266 long int ret_offset;
6267 struct elf_link_hash_entry * myh;
6268 struct elf32_arm_link_hash_table * globals;
6269
6270 myh = find_thumb_glue (info, name, error_message);
6271 if (myh == NULL)
6272 return FALSE;
6273
6274 globals = elf32_arm_hash_table (info);
6275
6276 BFD_ASSERT (globals != NULL);
6277 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6278
6279 my_offset = myh->root.u.def.value;
6280
6281 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6282 THUMB2ARM_GLUE_SECTION_NAME);
6283
6284 BFD_ASSERT (s != NULL);
6285 BFD_ASSERT (s->contents != NULL);
6286 BFD_ASSERT (s->output_section != NULL);
6287
6288 if ((my_offset & 0x01) == 0x01)
6289 {
6290 if (sym_sec != NULL
6291 && sym_sec->owner != NULL
6292 && !INTERWORK_FLAG (sym_sec->owner))
6293 {
6294 (*_bfd_error_handler)
6295 (_("%B(%s): warning: interworking not enabled.\n"
6296 " first occurrence: %B: thumb call to arm"),
6297 sym_sec->owner, input_bfd, name);
6298
6299 return FALSE;
6300 }
6301
6302 --my_offset;
6303 myh->root.u.def.value = my_offset;
6304
6305 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6306 s->contents + my_offset);
6307
6308 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6309 s->contents + my_offset + 2);
6310
6311 ret_offset =
6312 /* Address of destination of the stub. */
6313 ((bfd_signed_vma) val)
6314 - ((bfd_signed_vma)
6315 /* Offset from the start of the current section
6316 to the start of the stubs. */
6317 (s->output_offset
6318 /* Offset of the start of this stub from the start of the stubs. */
6319 + my_offset
6320 /* Address of the start of the current section. */
6321 + s->output_section->vma)
6322 /* The branch instruction is 4 bytes into the stub. */
6323 + 4
6324 /* ARM branches work from the pc of the instruction + 8. */
6325 + 8);
6326
6327 put_arm_insn (globals, output_bfd,
6328 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6329 s->contents + my_offset + 4);
6330 }
6331
6332 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6333
6334 /* Now go back and fix up the original BL insn to point to here. */
6335 ret_offset =
6336 /* Address of where the stub is located. */
6337 (s->output_section->vma + s->output_offset + my_offset)
6338 /* Address of where the BL is located. */
6339 - (input_section->output_section->vma + input_section->output_offset
6340 + offset)
6341 /* Addend in the relocation. */
6342 - addend
6343 /* Biassing for PC-relative addressing. */
6344 - 8;
6345
6346 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6347
6348 return TRUE;
6349 }
6350
6351 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6352
6353 static struct elf_link_hash_entry *
6354 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6355 const char * name,
6356 bfd * input_bfd,
6357 bfd * output_bfd,
6358 asection * sym_sec,
6359 bfd_vma val,
6360 asection * s,
6361 char ** error_message)
6362 {
6363 bfd_vma my_offset;
6364 long int ret_offset;
6365 struct elf_link_hash_entry * myh;
6366 struct elf32_arm_link_hash_table * globals;
6367
6368 myh = find_arm_glue (info, name, error_message);
6369 if (myh == NULL)
6370 return NULL;
6371
6372 globals = elf32_arm_hash_table (info);
6373
6374 BFD_ASSERT (globals != NULL);
6375 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6376
6377 my_offset = myh->root.u.def.value;
6378
6379 if ((my_offset & 0x01) == 0x01)
6380 {
6381 if (sym_sec != NULL
6382 && sym_sec->owner != NULL
6383 && !INTERWORK_FLAG (sym_sec->owner))
6384 {
6385 (*_bfd_error_handler)
6386 (_("%B(%s): warning: interworking not enabled.\n"
6387 " first occurrence: %B: arm call to thumb"),
6388 sym_sec->owner, input_bfd, name);
6389 }
6390
6391 --my_offset;
6392 myh->root.u.def.value = my_offset;
6393
6394 if (info->shared || globals->root.is_relocatable_executable
6395 || globals->pic_veneer)
6396 {
6397 /* For relocatable objects we can't use absolute addresses,
6398 so construct the address from a relative offset. */
6399 /* TODO: If the offset is small it's probably worth
6400 constructing the address with adds. */
6401 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6402 s->contents + my_offset);
6403 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6404 s->contents + my_offset + 4);
6405 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6406 s->contents + my_offset + 8);
6407 /* Adjust the offset by 4 for the position of the add,
6408 and 8 for the pipeline offset. */
6409 ret_offset = (val - (s->output_offset
6410 + s->output_section->vma
6411 + my_offset + 12))
6412 | 1;
6413 bfd_put_32 (output_bfd, ret_offset,
6414 s->contents + my_offset + 12);
6415 }
6416 else if (globals->use_blx)
6417 {
6418 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6419 s->contents + my_offset);
6420
6421 /* It's a thumb address. Add the low order bit. */
6422 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6423 s->contents + my_offset + 4);
6424 }
6425 else
6426 {
6427 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6428 s->contents + my_offset);
6429
6430 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6431 s->contents + my_offset + 4);
6432
6433 /* It's a thumb address. Add the low order bit. */
6434 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6435 s->contents + my_offset + 8);
6436
6437 my_offset += 12;
6438 }
6439 }
6440
6441 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6442
6443 return myh;
6444 }
6445
6446 /* Arm code calling a Thumb function. */
6447
6448 static int
6449 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6450 const char * name,
6451 bfd * input_bfd,
6452 bfd * output_bfd,
6453 asection * input_section,
6454 bfd_byte * hit_data,
6455 asection * sym_sec,
6456 bfd_vma offset,
6457 bfd_signed_vma addend,
6458 bfd_vma val,
6459 char **error_message)
6460 {
6461 unsigned long int tmp;
6462 bfd_vma my_offset;
6463 asection * s;
6464 long int ret_offset;
6465 struct elf_link_hash_entry * myh;
6466 struct elf32_arm_link_hash_table * globals;
6467
6468 globals = elf32_arm_hash_table (info);
6469
6470 BFD_ASSERT (globals != NULL);
6471 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6472
6473 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6474 ARM2THUMB_GLUE_SECTION_NAME);
6475 BFD_ASSERT (s != NULL);
6476 BFD_ASSERT (s->contents != NULL);
6477 BFD_ASSERT (s->output_section != NULL);
6478
6479 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6480 sym_sec, val, s, error_message);
6481 if (!myh)
6482 return FALSE;
6483
6484 my_offset = myh->root.u.def.value;
6485 tmp = bfd_get_32 (input_bfd, hit_data);
6486 tmp = tmp & 0xFF000000;
6487
6488 /* Somehow these are both 4 too far, so subtract 8. */
6489 ret_offset = (s->output_offset
6490 + my_offset
6491 + s->output_section->vma
6492 - (input_section->output_offset
6493 + input_section->output_section->vma
6494 + offset + addend)
6495 - 8);
6496
6497 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6498
6499 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6500
6501 return TRUE;
6502 }
6503
6504 /* Populate Arm stub for an exported Thumb function. */
6505
6506 static bfd_boolean
6507 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6508 {
6509 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6510 asection * s;
6511 struct elf_link_hash_entry * myh;
6512 struct elf32_arm_link_hash_entry *eh;
6513 struct elf32_arm_link_hash_table * globals;
6514 asection *sec;
6515 bfd_vma val;
6516 char *error_message;
6517
6518 eh = elf32_arm_hash_entry (h);
6519 /* Allocate stubs for exported Thumb functions on v4t. */
6520 if (eh->export_glue == NULL)
6521 return TRUE;
6522
6523 globals = elf32_arm_hash_table (info);
6524
6525 BFD_ASSERT (globals != NULL);
6526 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6527
6528 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6529 ARM2THUMB_GLUE_SECTION_NAME);
6530 BFD_ASSERT (s != NULL);
6531 BFD_ASSERT (s->contents != NULL);
6532 BFD_ASSERT (s->output_section != NULL);
6533
6534 sec = eh->export_glue->root.u.def.section;
6535
6536 BFD_ASSERT (sec->output_section != NULL);
6537
6538 val = eh->export_glue->root.u.def.value + sec->output_offset
6539 + sec->output_section->vma;
6540
6541 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6542 h->root.u.def.section->owner,
6543 globals->obfd, sec, val, s,
6544 &error_message);
6545 BFD_ASSERT (myh);
6546 return TRUE;
6547 }
6548
6549 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6550
6551 static bfd_vma
6552 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6553 {
6554 bfd_byte *p;
6555 bfd_vma glue_addr;
6556 asection *s;
6557 struct elf32_arm_link_hash_table *globals;
6558
6559 globals = elf32_arm_hash_table (info);
6560
6561 BFD_ASSERT (globals != NULL);
6562 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6563
6564 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6565 ARM_BX_GLUE_SECTION_NAME);
6566 BFD_ASSERT (s != NULL);
6567 BFD_ASSERT (s->contents != NULL);
6568 BFD_ASSERT (s->output_section != NULL);
6569
6570 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6571
6572 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6573
6574 if ((globals->bx_glue_offset[reg] & 1) == 0)
6575 {
6576 p = s->contents + glue_addr;
6577 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6578 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6579 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6580 globals->bx_glue_offset[reg] |= 1;
6581 }
6582
6583 return glue_addr + s->output_section->vma + s->output_offset;
6584 }
6585
6586 /* Generate Arm stubs for exported Thumb symbols. */
6587 static void
6588 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6589 struct bfd_link_info *link_info)
6590 {
6591 struct elf32_arm_link_hash_table * globals;
6592
6593 if (link_info == NULL)
6594 /* Ignore this if we are not called by the ELF backend linker. */
6595 return;
6596
6597 globals = elf32_arm_hash_table (link_info);
6598 /* If blx is available then exported Thumb symbols are OK and there is
6599 nothing to do. */
6600 if (globals->use_blx)
6601 return;
6602
6603 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6604 link_info);
6605 }
6606
6607 /* Some relocations map to different relocations depending on the
6608 target. Return the real relocation. */
6609
6610 static int
6611 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6612 int r_type)
6613 {
6614 switch (r_type)
6615 {
6616 case R_ARM_TARGET1:
6617 if (globals->target1_is_rel)
6618 return R_ARM_REL32;
6619 else
6620 return R_ARM_ABS32;
6621
6622 case R_ARM_TARGET2:
6623 return globals->target2_reloc;
6624
6625 default:
6626 return r_type;
6627 }
6628 }
6629
6630 /* Return the base VMA address which should be subtracted from real addresses
6631 when resolving @dtpoff relocation.
6632 This is PT_TLS segment p_vaddr. */
6633
6634 static bfd_vma
6635 dtpoff_base (struct bfd_link_info *info)
6636 {
6637 /* If tls_sec is NULL, we should have signalled an error already. */
6638 if (elf_hash_table (info)->tls_sec == NULL)
6639 return 0;
6640 return elf_hash_table (info)->tls_sec->vma;
6641 }
6642
6643 /* Return the relocation value for @tpoff relocation
6644 if STT_TLS virtual address is ADDRESS. */
6645
6646 static bfd_vma
6647 tpoff (struct bfd_link_info *info, bfd_vma address)
6648 {
6649 struct elf_link_hash_table *htab = elf_hash_table (info);
6650 bfd_vma base;
6651
6652 /* If tls_sec is NULL, we should have signalled an error already. */
6653 if (htab->tls_sec == NULL)
6654 return 0;
6655 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6656 return address - htab->tls_sec->vma + base;
6657 }
6658
6659 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6660 VALUE is the relocation value. */
6661
6662 static bfd_reloc_status_type
6663 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6664 {
6665 if (value > 0xfff)
6666 return bfd_reloc_overflow;
6667
6668 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6669 bfd_put_32 (abfd, value, data);
6670 return bfd_reloc_ok;
6671 }
6672
6673 /* For a given value of n, calculate the value of G_n as required to
6674 deal with group relocations. We return it in the form of an
6675 encoded constant-and-rotation, together with the final residual. If n is
6676 specified as less than zero, then final_residual is filled with the
6677 input value and no further action is performed. */
6678
6679 static bfd_vma
6680 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6681 {
6682 int current_n;
6683 bfd_vma g_n;
6684 bfd_vma encoded_g_n = 0;
6685 bfd_vma residual = value; /* Also known as Y_n. */
6686
6687 for (current_n = 0; current_n <= n; current_n++)
6688 {
6689 int shift;
6690
6691 /* Calculate which part of the value to mask. */
6692 if (residual == 0)
6693 shift = 0;
6694 else
6695 {
6696 int msb;
6697
6698 /* Determine the most significant bit in the residual and
6699 align the resulting value to a 2-bit boundary. */
6700 for (msb = 30; msb >= 0; msb -= 2)
6701 if (residual & (3 << msb))
6702 break;
6703
6704 /* The desired shift is now (msb - 6), or zero, whichever
6705 is the greater. */
6706 shift = msb - 6;
6707 if (shift < 0)
6708 shift = 0;
6709 }
6710
6711 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6712 g_n = residual & (0xff << shift);
6713 encoded_g_n = (g_n >> shift)
6714 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6715
6716 /* Calculate the residual for the next time around. */
6717 residual &= ~g_n;
6718 }
6719
6720 *final_residual = residual;
6721
6722 return encoded_g_n;
6723 }
6724
6725 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6726 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6727
6728 static int
6729 identify_add_or_sub (bfd_vma insn)
6730 {
6731 int opcode = insn & 0x1e00000;
6732
6733 if (opcode == 1 << 23) /* ADD */
6734 return 1;
6735
6736 if (opcode == 1 << 22) /* SUB */
6737 return -1;
6738
6739 return 0;
6740 }
6741
6742 /* Perform a relocation as part of a final link. */
6743
6744 static bfd_reloc_status_type
6745 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6746 bfd * input_bfd,
6747 bfd * output_bfd,
6748 asection * input_section,
6749 bfd_byte * contents,
6750 Elf_Internal_Rela * rel,
6751 bfd_vma value,
6752 struct bfd_link_info * info,
6753 asection * sym_sec,
6754 const char * sym_name,
6755 int sym_flags,
6756 struct elf_link_hash_entry * h,
6757 bfd_boolean * unresolved_reloc_p,
6758 char ** error_message)
6759 {
6760 unsigned long r_type = howto->type;
6761 unsigned long r_symndx;
6762 bfd_byte * hit_data = contents + rel->r_offset;
6763 bfd * dynobj = NULL;
6764 Elf_Internal_Shdr * symtab_hdr;
6765 struct elf_link_hash_entry ** sym_hashes;
6766 bfd_vma * local_got_offsets;
6767 asection * sgot = NULL;
6768 asection * splt = NULL;
6769 asection * sreloc = NULL;
6770 bfd_vma addend;
6771 bfd_signed_vma signed_addend;
6772 struct elf32_arm_link_hash_table * globals;
6773
6774 globals = elf32_arm_hash_table (info);
6775
6776 BFD_ASSERT (is_arm_elf (input_bfd));
6777
6778 /* Some relocation types map to different relocations depending on the
6779 target. We pick the right one here. */
6780 r_type = arm_real_reloc_type (globals, r_type);
6781 if (r_type != howto->type)
6782 howto = elf32_arm_howto_from_type (r_type);
6783
6784 /* If the start address has been set, then set the EF_ARM_HASENTRY
6785 flag. Setting this more than once is redundant, but the cost is
6786 not too high, and it keeps the code simple.
6787
6788 The test is done here, rather than somewhere else, because the
6789 start address is only set just before the final link commences.
6790
6791 Note - if the user deliberately sets a start address of 0, the
6792 flag will not be set. */
6793 if (bfd_get_start_address (output_bfd) != 0)
6794 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6795
6796 dynobj = elf_hash_table (info)->dynobj;
6797 if (dynobj)
6798 {
6799 sgot = bfd_get_section_by_name (dynobj, ".got");
6800 splt = bfd_get_section_by_name (dynobj, ".plt");
6801 }
6802 symtab_hdr = & elf_symtab_hdr (input_bfd);
6803 sym_hashes = elf_sym_hashes (input_bfd);
6804 local_got_offsets = elf_local_got_offsets (input_bfd);
6805 r_symndx = ELF32_R_SYM (rel->r_info);
6806
6807 if (globals->use_rel)
6808 {
6809 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6810
6811 if (addend & ((howto->src_mask + 1) >> 1))
6812 {
6813 signed_addend = -1;
6814 signed_addend &= ~ howto->src_mask;
6815 signed_addend |= addend;
6816 }
6817 else
6818 signed_addend = addend;
6819 }
6820 else
6821 addend = signed_addend = rel->r_addend;
6822
6823 switch (r_type)
6824 {
6825 case R_ARM_NONE:
6826 /* We don't need to find a value for this symbol. It's just a
6827 marker. */
6828 *unresolved_reloc_p = FALSE;
6829 return bfd_reloc_ok;
6830
6831 case R_ARM_ABS12:
6832 if (!globals->vxworks_p)
6833 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6834
6835 case R_ARM_PC24:
6836 case R_ARM_ABS32:
6837 case R_ARM_ABS32_NOI:
6838 case R_ARM_REL32:
6839 case R_ARM_REL32_NOI:
6840 case R_ARM_CALL:
6841 case R_ARM_JUMP24:
6842 case R_ARM_XPC25:
6843 case R_ARM_PREL31:
6844 case R_ARM_PLT32:
6845 /* Handle relocations which should use the PLT entry. ABS32/REL32
6846 will use the symbol's value, which may point to a PLT entry, but we
6847 don't need to handle that here. If we created a PLT entry, all
6848 branches in this object should go to it, except if the PLT is too
6849 far away, in which case a long branch stub should be inserted. */
6850 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6851 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6852 && r_type != R_ARM_CALL
6853 && r_type != R_ARM_JUMP24
6854 && r_type != R_ARM_PLT32)
6855 && h != NULL
6856 && splt != NULL
6857 && h->plt.offset != (bfd_vma) -1)
6858 {
6859 /* If we've created a .plt section, and assigned a PLT entry to
6860 this function, it should not be known to bind locally. If
6861 it were, we would have cleared the PLT entry. */
6862 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6863
6864 value = (splt->output_section->vma
6865 + splt->output_offset
6866 + h->plt.offset);
6867 *unresolved_reloc_p = FALSE;
6868 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6869 contents, rel->r_offset, value,
6870 rel->r_addend);
6871 }
6872
6873 /* When generating a shared object or relocatable executable, these
6874 relocations are copied into the output file to be resolved at
6875 run time. */
6876 if ((info->shared || globals->root.is_relocatable_executable)
6877 && (input_section->flags & SEC_ALLOC)
6878 && !(elf32_arm_hash_table (info)->vxworks_p
6879 && strcmp (input_section->output_section->name,
6880 ".tls_vars") == 0)
6881 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6882 || !SYMBOL_CALLS_LOCAL (info, h))
6883 && (h == NULL
6884 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6885 || h->root.type != bfd_link_hash_undefweak)
6886 && r_type != R_ARM_PC24
6887 && r_type != R_ARM_CALL
6888 && r_type != R_ARM_JUMP24
6889 && r_type != R_ARM_PREL31
6890 && r_type != R_ARM_PLT32)
6891 {
6892 Elf_Internal_Rela outrel;
6893 bfd_byte *loc;
6894 bfd_boolean skip, relocate;
6895
6896 *unresolved_reloc_p = FALSE;
6897
6898 if (sreloc == NULL)
6899 {
6900 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6901 ! globals->use_rel);
6902
6903 if (sreloc == NULL)
6904 return bfd_reloc_notsupported;
6905 }
6906
6907 skip = FALSE;
6908 relocate = FALSE;
6909
6910 outrel.r_addend = addend;
6911 outrel.r_offset =
6912 _bfd_elf_section_offset (output_bfd, info, input_section,
6913 rel->r_offset);
6914 if (outrel.r_offset == (bfd_vma) -1)
6915 skip = TRUE;
6916 else if (outrel.r_offset == (bfd_vma) -2)
6917 skip = TRUE, relocate = TRUE;
6918 outrel.r_offset += (input_section->output_section->vma
6919 + input_section->output_offset);
6920
6921 if (skip)
6922 memset (&outrel, 0, sizeof outrel);
6923 else if (h != NULL
6924 && h->dynindx != -1
6925 && (!info->shared
6926 || !info->symbolic
6927 || !h->def_regular))
6928 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6929 else
6930 {
6931 int symbol;
6932
6933 /* This symbol is local, or marked to become local. */
6934 if (sym_flags == STT_ARM_TFUNC)
6935 value |= 1;
6936 if (globals->symbian_p)
6937 {
6938 asection *osec;
6939
6940 /* On Symbian OS, the data segment and text segement
6941 can be relocated independently. Therefore, we
6942 must indicate the segment to which this
6943 relocation is relative. The BPABI allows us to
6944 use any symbol in the right segment; we just use
6945 the section symbol as it is convenient. (We
6946 cannot use the symbol given by "h" directly as it
6947 will not appear in the dynamic symbol table.)
6948
6949 Note that the dynamic linker ignores the section
6950 symbol value, so we don't subtract osec->vma
6951 from the emitted reloc addend. */
6952 if (sym_sec)
6953 osec = sym_sec->output_section;
6954 else
6955 osec = input_section->output_section;
6956 symbol = elf_section_data (osec)->dynindx;
6957 if (symbol == 0)
6958 {
6959 struct elf_link_hash_table *htab = elf_hash_table (info);
6960
6961 if ((osec->flags & SEC_READONLY) == 0
6962 && htab->data_index_section != NULL)
6963 osec = htab->data_index_section;
6964 else
6965 osec = htab->text_index_section;
6966 symbol = elf_section_data (osec)->dynindx;
6967 }
6968 BFD_ASSERT (symbol != 0);
6969 }
6970 else
6971 /* On SVR4-ish systems, the dynamic loader cannot
6972 relocate the text and data segments independently,
6973 so the symbol does not matter. */
6974 symbol = 0;
6975 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6976 if (globals->use_rel)
6977 relocate = TRUE;
6978 else
6979 outrel.r_addend += value;
6980 }
6981
6982 loc = sreloc->contents;
6983 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6984 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6985
6986 /* If this reloc is against an external symbol, we do not want to
6987 fiddle with the addend. Otherwise, we need to include the symbol
6988 value so that it becomes an addend for the dynamic reloc. */
6989 if (! relocate)
6990 return bfd_reloc_ok;
6991
6992 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6993 contents, rel->r_offset, value,
6994 (bfd_vma) 0);
6995 }
6996 else switch (r_type)
6997 {
6998 case R_ARM_ABS12:
6999 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7000
7001 case R_ARM_XPC25: /* Arm BLX instruction. */
7002 case R_ARM_CALL:
7003 case R_ARM_JUMP24:
7004 case R_ARM_PC24: /* Arm B/BL instruction. */
7005 case R_ARM_PLT32:
7006 {
7007 bfd_signed_vma branch_offset;
7008 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7009
7010 if (r_type == R_ARM_XPC25)
7011 {
7012 /* Check for Arm calling Arm function. */
7013 /* FIXME: Should we translate the instruction into a BL
7014 instruction instead ? */
7015 if (sym_flags != STT_ARM_TFUNC)
7016 (*_bfd_error_handler)
7017 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7018 input_bfd,
7019 h ? h->root.root.string : "(local)");
7020 }
7021 else if (r_type == R_ARM_PC24)
7022 {
7023 /* Check for Arm calling Thumb function. */
7024 if (sym_flags == STT_ARM_TFUNC)
7025 {
7026 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7027 output_bfd, input_section,
7028 hit_data, sym_sec, rel->r_offset,
7029 signed_addend, value,
7030 error_message))
7031 return bfd_reloc_ok;
7032 else
7033 return bfd_reloc_dangerous;
7034 }
7035 }
7036
7037 /* Check if a stub has to be inserted because the
7038 destination is too far or we are changing mode. */
7039 if ( r_type == R_ARM_CALL
7040 || r_type == R_ARM_JUMP24
7041 || r_type == R_ARM_PLT32)
7042 {
7043 bfd_vma from;
7044
7045 /* If the call goes through a PLT entry, make sure to
7046 check distance to the right destination address. */
7047 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7048 {
7049 value = (splt->output_section->vma
7050 + splt->output_offset
7051 + h->plt.offset);
7052 *unresolved_reloc_p = FALSE;
7053 /* The PLT entry is in ARM mode, regardless of the
7054 target function. */
7055 sym_flags = STT_FUNC;
7056 }
7057
7058 from = (input_section->output_section->vma
7059 + input_section->output_offset
7060 + rel->r_offset);
7061 branch_offset = (bfd_signed_vma)(value - from);
7062
7063 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
7064 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
7065 || ((sym_flags == STT_ARM_TFUNC)
7066 && (((r_type == R_ARM_CALL) && !globals->use_blx)
7067 || (r_type == R_ARM_JUMP24)
7068 || (r_type == R_ARM_PLT32) ))
7069 )
7070 {
7071 /* The target is out of reach, so redirect the
7072 branch to the local stub for this function. */
7073
7074 stub_entry = elf32_arm_get_stub_entry (input_section,
7075 sym_sec, h,
7076 rel, globals);
7077 if (stub_entry != NULL)
7078 value = (stub_entry->stub_offset
7079 + stub_entry->stub_sec->output_offset
7080 + stub_entry->stub_sec->output_section->vma);
7081 }
7082 }
7083
7084 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7085 where:
7086 S is the address of the symbol in the relocation.
7087 P is address of the instruction being relocated.
7088 A is the addend (extracted from the instruction) in bytes.
7089
7090 S is held in 'value'.
7091 P is the base address of the section containing the
7092 instruction plus the offset of the reloc into that
7093 section, ie:
7094 (input_section->output_section->vma +
7095 input_section->output_offset +
7096 rel->r_offset).
7097 A is the addend, converted into bytes, ie:
7098 (signed_addend * 4)
7099
7100 Note: None of these operations have knowledge of the pipeline
7101 size of the processor, thus it is up to the assembler to
7102 encode this information into the addend. */
7103 value -= (input_section->output_section->vma
7104 + input_section->output_offset);
7105 value -= rel->r_offset;
7106 if (globals->use_rel)
7107 value += (signed_addend << howto->size);
7108 else
7109 /* RELA addends do not have to be adjusted by howto->size. */
7110 value += signed_addend;
7111
7112 signed_addend = value;
7113 signed_addend >>= howto->rightshift;
7114
7115 /* A branch to an undefined weak symbol is turned into a jump to
7116 the next instruction unless a PLT entry will be created.
7117 Do the same for local undefined symbols.
7118 The jump to the next instruction is optimized as a NOP depending
7119 on the architecture. */
7120 if (h ? (h->root.type == bfd_link_hash_undefweak
7121 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7122 : bfd_is_und_section (sym_sec))
7123 {
7124 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7125
7126 if (arch_has_arm_nop (globals))
7127 value |= 0x0320f000;
7128 else
7129 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7130 }
7131 else
7132 {
7133 /* Perform a signed range check. */
7134 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7135 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7136 return bfd_reloc_overflow;
7137
7138 addend = (value & 2);
7139
7140 value = (signed_addend & howto->dst_mask)
7141 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7142
7143 if (r_type == R_ARM_CALL)
7144 {
7145 /* Set the H bit in the BLX instruction. */
7146 if (sym_flags == STT_ARM_TFUNC)
7147 {
7148 if (addend)
7149 value |= (1 << 24);
7150 else
7151 value &= ~(bfd_vma)(1 << 24);
7152 }
7153
7154 /* Select the correct instruction (BL or BLX). */
7155 /* Only if we are not handling a BL to a stub. In this
7156 case, mode switching is performed by the stub. */
7157 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7158 value |= (1 << 28);
7159 else
7160 {
7161 value &= ~(bfd_vma)(1 << 28);
7162 value |= (1 << 24);
7163 }
7164 }
7165 }
7166 }
7167 break;
7168
7169 case R_ARM_ABS32:
7170 value += addend;
7171 if (sym_flags == STT_ARM_TFUNC)
7172 value |= 1;
7173 break;
7174
7175 case R_ARM_ABS32_NOI:
7176 value += addend;
7177 break;
7178
7179 case R_ARM_REL32:
7180 value += addend;
7181 if (sym_flags == STT_ARM_TFUNC)
7182 value |= 1;
7183 value -= (input_section->output_section->vma
7184 + input_section->output_offset + rel->r_offset);
7185 break;
7186
7187 case R_ARM_REL32_NOI:
7188 value += addend;
7189 value -= (input_section->output_section->vma
7190 + input_section->output_offset + rel->r_offset);
7191 break;
7192
7193 case R_ARM_PREL31:
7194 value -= (input_section->output_section->vma
7195 + input_section->output_offset + rel->r_offset);
7196 value += signed_addend;
7197 if (! h || h->root.type != bfd_link_hash_undefweak)
7198 {
7199 /* Check for overflow. */
7200 if ((value ^ (value >> 1)) & (1 << 30))
7201 return bfd_reloc_overflow;
7202 }
7203 value &= 0x7fffffff;
7204 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7205 if (sym_flags == STT_ARM_TFUNC)
7206 value |= 1;
7207 break;
7208 }
7209
7210 bfd_put_32 (input_bfd, value, hit_data);
7211 return bfd_reloc_ok;
7212
7213 case R_ARM_ABS8:
7214 value += addend;
7215 if ((long) value > 0x7f || (long) value < -0x80)
7216 return bfd_reloc_overflow;
7217
7218 bfd_put_8 (input_bfd, value, hit_data);
7219 return bfd_reloc_ok;
7220
7221 case R_ARM_ABS16:
7222 value += addend;
7223
7224 if ((long) value > 0x7fff || (long) value < -0x8000)
7225 return bfd_reloc_overflow;
7226
7227 bfd_put_16 (input_bfd, value, hit_data);
7228 return bfd_reloc_ok;
7229
7230 case R_ARM_THM_ABS5:
7231 /* Support ldr and str instructions for the thumb. */
7232 if (globals->use_rel)
7233 {
7234 /* Need to refetch addend. */
7235 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7236 /* ??? Need to determine shift amount from operand size. */
7237 addend >>= howto->rightshift;
7238 }
7239 value += addend;
7240
7241 /* ??? Isn't value unsigned? */
7242 if ((long) value > 0x1f || (long) value < -0x10)
7243 return bfd_reloc_overflow;
7244
7245 /* ??? Value needs to be properly shifted into place first. */
7246 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7247 bfd_put_16 (input_bfd, value, hit_data);
7248 return bfd_reloc_ok;
7249
7250 case R_ARM_THM_ALU_PREL_11_0:
7251 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7252 {
7253 bfd_vma insn;
7254 bfd_signed_vma relocation;
7255
7256 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7257 | bfd_get_16 (input_bfd, hit_data + 2);
7258
7259 if (globals->use_rel)
7260 {
7261 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7262 | ((insn & (1 << 26)) >> 15);
7263 if (insn & 0xf00000)
7264 signed_addend = -signed_addend;
7265 }
7266
7267 relocation = value + signed_addend;
7268 relocation -= (input_section->output_section->vma
7269 + input_section->output_offset
7270 + rel->r_offset);
7271
7272 value = abs (relocation);
7273
7274 if (value >= 0x1000)
7275 return bfd_reloc_overflow;
7276
7277 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7278 | ((value & 0x700) << 4)
7279 | ((value & 0x800) << 15);
7280 if (relocation < 0)
7281 insn |= 0xa00000;
7282
7283 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7284 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7285
7286 return bfd_reloc_ok;
7287 }
7288
7289 case R_ARM_THM_PC8:
7290 /* PR 10073: This reloc is not generated by the GNU toolchain,
7291 but it is supported for compatibility with third party libraries
7292 generated by other compilers, specifically the ARM/IAR. */
7293 {
7294 bfd_vma insn;
7295 bfd_signed_vma relocation;
7296
7297 insn = bfd_get_16 (input_bfd, hit_data);
7298
7299 if (globals->use_rel)
7300 addend = (insn & 0x00ff) << 2;
7301
7302 relocation = value + addend;
7303 relocation -= (input_section->output_section->vma
7304 + input_section->output_offset
7305 + rel->r_offset);
7306
7307 value = abs (relocation);
7308
7309 /* We do not check for overflow of this reloc. Although strictly
7310 speaking this is incorrect, it appears to be necessary in order
7311 to work with IAR generated relocs. Since GCC and GAS do not
7312 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7313 a problem for them. */
7314 value &= 0x3fc;
7315
7316 insn = (insn & 0xff00) | (value >> 2);
7317
7318 bfd_put_16 (input_bfd, insn, hit_data);
7319
7320 return bfd_reloc_ok;
7321 }
7322
7323 case R_ARM_THM_PC12:
7324 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7325 {
7326 bfd_vma insn;
7327 bfd_signed_vma relocation;
7328
7329 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7330 | bfd_get_16 (input_bfd, hit_data + 2);
7331
7332 if (globals->use_rel)
7333 {
7334 signed_addend = insn & 0xfff;
7335 if (!(insn & (1 << 23)))
7336 signed_addend = -signed_addend;
7337 }
7338
7339 relocation = value + signed_addend;
7340 relocation -= (input_section->output_section->vma
7341 + input_section->output_offset
7342 + rel->r_offset);
7343
7344 value = abs (relocation);
7345
7346 if (value >= 0x1000)
7347 return bfd_reloc_overflow;
7348
7349 insn = (insn & 0xff7ff000) | value;
7350 if (relocation >= 0)
7351 insn |= (1 << 23);
7352
7353 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7354 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7355
7356 return bfd_reloc_ok;
7357 }
7358
7359 case R_ARM_THM_XPC22:
7360 case R_ARM_THM_CALL:
7361 case R_ARM_THM_JUMP24:
7362 /* Thumb BL (branch long instruction). */
7363 {
7364 bfd_vma relocation;
7365 bfd_vma reloc_sign;
7366 bfd_boolean overflow = FALSE;
7367 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7368 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7369 bfd_signed_vma reloc_signed_max;
7370 bfd_signed_vma reloc_signed_min;
7371 bfd_vma check;
7372 bfd_signed_vma signed_check;
7373 int bitsize;
7374 const int thumb2 = using_thumb2 (globals);
7375
7376 /* A branch to an undefined weak symbol is turned into a jump to
7377 the next instruction unless a PLT entry will be created.
7378 The jump to the next instruction is optimized as a NOP.W for
7379 Thumb-2 enabled architectures. */
7380 if (h && h->root.type == bfd_link_hash_undefweak
7381 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7382 {
7383 if (arch_has_thumb2_nop (globals))
7384 {
7385 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7386 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7387 }
7388 else
7389 {
7390 bfd_put_16 (input_bfd, 0xe000, hit_data);
7391 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7392 }
7393 return bfd_reloc_ok;
7394 }
7395
7396 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7397 with Thumb-1) involving the J1 and J2 bits. */
7398 if (globals->use_rel)
7399 {
7400 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7401 bfd_vma upper = upper_insn & 0x3ff;
7402 bfd_vma lower = lower_insn & 0x7ff;
7403 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7404 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7405 bfd_vma i1 = j1 ^ s ? 0 : 1;
7406 bfd_vma i2 = j2 ^ s ? 0 : 1;
7407
7408 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7409 /* Sign extend. */
7410 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7411
7412 signed_addend = addend;
7413 }
7414
7415 if (r_type == R_ARM_THM_XPC22)
7416 {
7417 /* Check for Thumb to Thumb call. */
7418 /* FIXME: Should we translate the instruction into a BL
7419 instruction instead ? */
7420 if (sym_flags == STT_ARM_TFUNC)
7421 (*_bfd_error_handler)
7422 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7423 input_bfd,
7424 h ? h->root.root.string : "(local)");
7425 }
7426 else
7427 {
7428 /* If it is not a call to Thumb, assume call to Arm.
7429 If it is a call relative to a section name, then it is not a
7430 function call at all, but rather a long jump. Calls through
7431 the PLT do not require stubs. */
7432 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7433 && (h == NULL || splt == NULL
7434 || h->plt.offset == (bfd_vma) -1))
7435 {
7436 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7437 {
7438 /* Convert BL to BLX. */
7439 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7440 }
7441 else if (( r_type != R_ARM_THM_CALL)
7442 && (r_type != R_ARM_THM_JUMP24))
7443 {
7444 if (elf32_thumb_to_arm_stub
7445 (info, sym_name, input_bfd, output_bfd, input_section,
7446 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7447 error_message))
7448 return bfd_reloc_ok;
7449 else
7450 return bfd_reloc_dangerous;
7451 }
7452 }
7453 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7454 && r_type == R_ARM_THM_CALL)
7455 {
7456 /* Make sure this is a BL. */
7457 lower_insn |= 0x1800;
7458 }
7459 }
7460
7461 /* Handle calls via the PLT. */
7462 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7463 {
7464 value = (splt->output_section->vma
7465 + splt->output_offset
7466 + h->plt.offset);
7467 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7468 {
7469 /* If the Thumb BLX instruction is available, convert the
7470 BL to a BLX instruction to call the ARM-mode PLT entry. */
7471 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7472 sym_flags = STT_FUNC;
7473 }
7474 else
7475 {
7476 /* Target the Thumb stub before the ARM PLT entry. */
7477 value -= PLT_THUMB_STUB_SIZE;
7478 sym_flags = STT_ARM_TFUNC;
7479 }
7480 *unresolved_reloc_p = FALSE;
7481 }
7482
7483 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7484 {
7485 /* Check if a stub has to be inserted because the destination
7486 is too far. */
7487 bfd_vma from;
7488 bfd_signed_vma branch_offset;
7489 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7490
7491 from = (input_section->output_section->vma
7492 + input_section->output_offset
7493 + rel->r_offset);
7494 branch_offset = (bfd_signed_vma)(value - from);
7495
7496 if ((!thumb2
7497 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7498 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7499 ||
7500 (thumb2
7501 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7502 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7503 || ((sym_flags != STT_ARM_TFUNC)
7504 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7505 || r_type == R_ARM_THM_JUMP24)))
7506 {
7507 /* The target is out of reach or we are changing modes, so
7508 redirect the branch to the local stub for this
7509 function. */
7510 stub_entry = elf32_arm_get_stub_entry (input_section,
7511 sym_sec, h,
7512 rel, globals);
7513 if (stub_entry != NULL)
7514 value = (stub_entry->stub_offset
7515 + stub_entry->stub_sec->output_offset
7516 + stub_entry->stub_sec->output_section->vma);
7517
7518 /* If this call becomes a call to Arm, force BLX. */
7519 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7520 {
7521 if ((stub_entry
7522 && !arm_stub_is_thumb (stub_entry->stub_type))
7523 || (sym_flags != STT_ARM_TFUNC))
7524 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7525 }
7526 }
7527 }
7528
7529 relocation = value + signed_addend;
7530
7531 relocation -= (input_section->output_section->vma
7532 + input_section->output_offset
7533 + rel->r_offset);
7534
7535 check = relocation >> howto->rightshift;
7536
7537 /* If this is a signed value, the rightshift just dropped
7538 leading 1 bits (assuming twos complement). */
7539 if ((bfd_signed_vma) relocation >= 0)
7540 signed_check = check;
7541 else
7542 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7543
7544 /* Calculate the permissable maximum and minimum values for
7545 this relocation according to whether we're relocating for
7546 Thumb-2 or not. */
7547 bitsize = howto->bitsize;
7548 if (!thumb2)
7549 bitsize -= 2;
7550 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7551 reloc_signed_min = ~reloc_signed_max;
7552
7553 /* Assumes two's complement. */
7554 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7555 overflow = TRUE;
7556
7557 if ((lower_insn & 0x5000) == 0x4000)
7558 /* For a BLX instruction, make sure that the relocation is rounded up
7559 to a word boundary. This follows the semantics of the instruction
7560 which specifies that bit 1 of the target address will come from bit
7561 1 of the base address. */
7562 relocation = (relocation + 2) & ~ 3;
7563
7564 /* Put RELOCATION back into the insn. Assumes two's complement.
7565 We use the Thumb-2 encoding, which is safe even if dealing with
7566 a Thumb-1 instruction by virtue of our overflow check above. */
7567 reloc_sign = (signed_check < 0) ? 1 : 0;
7568 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7569 | ((relocation >> 12) & 0x3ff)
7570 | (reloc_sign << 10);
7571 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7572 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7573 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7574 | ((relocation >> 1) & 0x7ff);
7575
7576 /* Put the relocated value back in the object file: */
7577 bfd_put_16 (input_bfd, upper_insn, hit_data);
7578 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7579
7580 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7581 }
7582 break;
7583
7584 case R_ARM_THM_JUMP19:
7585 /* Thumb32 conditional branch instruction. */
7586 {
7587 bfd_vma relocation;
7588 bfd_boolean overflow = FALSE;
7589 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7590 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7591 bfd_signed_vma reloc_signed_max = 0xffffe;
7592 bfd_signed_vma reloc_signed_min = -0x100000;
7593 bfd_signed_vma signed_check;
7594
7595 /* Need to refetch the addend, reconstruct the top three bits,
7596 and squish the two 11 bit pieces together. */
7597 if (globals->use_rel)
7598 {
7599 bfd_vma S = (upper_insn & 0x0400) >> 10;
7600 bfd_vma upper = (upper_insn & 0x003f);
7601 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7602 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7603 bfd_vma lower = (lower_insn & 0x07ff);
7604
7605 upper |= J1 << 6;
7606 upper |= J2 << 7;
7607 upper |= (!S) << 8;
7608 upper -= 0x0100; /* Sign extend. */
7609
7610 addend = (upper << 12) | (lower << 1);
7611 signed_addend = addend;
7612 }
7613
7614 /* Handle calls via the PLT. */
7615 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7616 {
7617 value = (splt->output_section->vma
7618 + splt->output_offset
7619 + h->plt.offset);
7620 /* Target the Thumb stub before the ARM PLT entry. */
7621 value -= PLT_THUMB_STUB_SIZE;
7622 *unresolved_reloc_p = FALSE;
7623 }
7624
7625 /* ??? Should handle interworking? GCC might someday try to
7626 use this for tail calls. */
7627
7628 relocation = value + signed_addend;
7629 relocation -= (input_section->output_section->vma
7630 + input_section->output_offset
7631 + rel->r_offset);
7632 signed_check = (bfd_signed_vma) relocation;
7633
7634 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7635 overflow = TRUE;
7636
7637 /* Put RELOCATION back into the insn. */
7638 {
7639 bfd_vma S = (relocation & 0x00100000) >> 20;
7640 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7641 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7642 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7643 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7644
7645 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7646 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7647 }
7648
7649 /* Put the relocated value back in the object file: */
7650 bfd_put_16 (input_bfd, upper_insn, hit_data);
7651 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7652
7653 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7654 }
7655
7656 case R_ARM_THM_JUMP11:
7657 case R_ARM_THM_JUMP8:
7658 case R_ARM_THM_JUMP6:
7659 /* Thumb B (branch) instruction). */
7660 {
7661 bfd_signed_vma relocation;
7662 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7663 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7664 bfd_signed_vma signed_check;
7665
7666 /* CZB cannot jump backward. */
7667 if (r_type == R_ARM_THM_JUMP6)
7668 reloc_signed_min = 0;
7669
7670 if (globals->use_rel)
7671 {
7672 /* Need to refetch addend. */
7673 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7674 if (addend & ((howto->src_mask + 1) >> 1))
7675 {
7676 signed_addend = -1;
7677 signed_addend &= ~ howto->src_mask;
7678 signed_addend |= addend;
7679 }
7680 else
7681 signed_addend = addend;
7682 /* The value in the insn has been right shifted. We need to
7683 undo this, so that we can perform the address calculation
7684 in terms of bytes. */
7685 signed_addend <<= howto->rightshift;
7686 }
7687 relocation = value + signed_addend;
7688
7689 relocation -= (input_section->output_section->vma
7690 + input_section->output_offset
7691 + rel->r_offset);
7692
7693 relocation >>= howto->rightshift;
7694 signed_check = relocation;
7695
7696 if (r_type == R_ARM_THM_JUMP6)
7697 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7698 else
7699 relocation &= howto->dst_mask;
7700 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7701
7702 bfd_put_16 (input_bfd, relocation, hit_data);
7703
7704 /* Assumes two's complement. */
7705 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7706 return bfd_reloc_overflow;
7707
7708 return bfd_reloc_ok;
7709 }
7710
7711 case R_ARM_ALU_PCREL7_0:
7712 case R_ARM_ALU_PCREL15_8:
7713 case R_ARM_ALU_PCREL23_15:
7714 {
7715 bfd_vma insn;
7716 bfd_vma relocation;
7717
7718 insn = bfd_get_32 (input_bfd, hit_data);
7719 if (globals->use_rel)
7720 {
7721 /* Extract the addend. */
7722 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7723 signed_addend = addend;
7724 }
7725 relocation = value + signed_addend;
7726
7727 relocation -= (input_section->output_section->vma
7728 + input_section->output_offset
7729 + rel->r_offset);
7730 insn = (insn & ~0xfff)
7731 | ((howto->bitpos << 7) & 0xf00)
7732 | ((relocation >> howto->bitpos) & 0xff);
7733 bfd_put_32 (input_bfd, value, hit_data);
7734 }
7735 return bfd_reloc_ok;
7736
7737 case R_ARM_GNU_VTINHERIT:
7738 case R_ARM_GNU_VTENTRY:
7739 return bfd_reloc_ok;
7740
7741 case R_ARM_GOTOFF32:
7742 /* Relocation is relative to the start of the
7743 global offset table. */
7744
7745 BFD_ASSERT (sgot != NULL);
7746 if (sgot == NULL)
7747 return bfd_reloc_notsupported;
7748
7749 /* If we are addressing a Thumb function, we need to adjust the
7750 address by one, so that attempts to call the function pointer will
7751 correctly interpret it as Thumb code. */
7752 if (sym_flags == STT_ARM_TFUNC)
7753 value += 1;
7754
7755 /* Note that sgot->output_offset is not involved in this
7756 calculation. We always want the start of .got. If we
7757 define _GLOBAL_OFFSET_TABLE in a different way, as is
7758 permitted by the ABI, we might have to change this
7759 calculation. */
7760 value -= sgot->output_section->vma;
7761 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7762 contents, rel->r_offset, value,
7763 rel->r_addend);
7764
7765 case R_ARM_GOTPC:
7766 /* Use global offset table as symbol value. */
7767 BFD_ASSERT (sgot != NULL);
7768
7769 if (sgot == NULL)
7770 return bfd_reloc_notsupported;
7771
7772 *unresolved_reloc_p = FALSE;
7773 value = sgot->output_section->vma;
7774 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7775 contents, rel->r_offset, value,
7776 rel->r_addend);
7777
7778 case R_ARM_GOT32:
7779 case R_ARM_GOT_PREL:
7780 /* Relocation is to the entry for this symbol in the
7781 global offset table. */
7782 if (sgot == NULL)
7783 return bfd_reloc_notsupported;
7784
7785 if (h != NULL)
7786 {
7787 bfd_vma off;
7788 bfd_boolean dyn;
7789
7790 off = h->got.offset;
7791 BFD_ASSERT (off != (bfd_vma) -1);
7792 dyn = globals->root.dynamic_sections_created;
7793
7794 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7795 || (info->shared
7796 && SYMBOL_REFERENCES_LOCAL (info, h))
7797 || (ELF_ST_VISIBILITY (h->other)
7798 && h->root.type == bfd_link_hash_undefweak))
7799 {
7800 /* This is actually a static link, or it is a -Bsymbolic link
7801 and the symbol is defined locally. We must initialize this
7802 entry in the global offset table. Since the offset must
7803 always be a multiple of 4, we use the least significant bit
7804 to record whether we have initialized it already.
7805
7806 When doing a dynamic link, we create a .rel(a).got relocation
7807 entry to initialize the value. This is done in the
7808 finish_dynamic_symbol routine. */
7809 if ((off & 1) != 0)
7810 off &= ~1;
7811 else
7812 {
7813 /* If we are addressing a Thumb function, we need to
7814 adjust the address by one, so that attempts to
7815 call the function pointer will correctly
7816 interpret it as Thumb code. */
7817 if (sym_flags == STT_ARM_TFUNC)
7818 value |= 1;
7819
7820 bfd_put_32 (output_bfd, value, sgot->contents + off);
7821 h->got.offset |= 1;
7822 }
7823 }
7824 else
7825 *unresolved_reloc_p = FALSE;
7826
7827 value = sgot->output_offset + off;
7828 }
7829 else
7830 {
7831 bfd_vma off;
7832
7833 BFD_ASSERT (local_got_offsets != NULL &&
7834 local_got_offsets[r_symndx] != (bfd_vma) -1);
7835
7836 off = local_got_offsets[r_symndx];
7837
7838 /* The offset must always be a multiple of 4. We use the
7839 least significant bit to record whether we have already
7840 generated the necessary reloc. */
7841 if ((off & 1) != 0)
7842 off &= ~1;
7843 else
7844 {
7845 /* If we are addressing a Thumb function, we need to
7846 adjust the address by one, so that attempts to
7847 call the function pointer will correctly
7848 interpret it as Thumb code. */
7849 if (sym_flags == STT_ARM_TFUNC)
7850 value |= 1;
7851
7852 if (globals->use_rel)
7853 bfd_put_32 (output_bfd, value, sgot->contents + off);
7854
7855 if (info->shared)
7856 {
7857 asection * srelgot;
7858 Elf_Internal_Rela outrel;
7859 bfd_byte *loc;
7860
7861 srelgot = (bfd_get_section_by_name
7862 (dynobj, RELOC_SECTION (globals, ".got")));
7863 BFD_ASSERT (srelgot != NULL);
7864
7865 outrel.r_addend = addend + value;
7866 outrel.r_offset = (sgot->output_section->vma
7867 + sgot->output_offset
7868 + off);
7869 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7870 loc = srelgot->contents;
7871 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7872 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7873 }
7874
7875 local_got_offsets[r_symndx] |= 1;
7876 }
7877
7878 value = sgot->output_offset + off;
7879 }
7880 if (r_type != R_ARM_GOT32)
7881 value += sgot->output_section->vma;
7882
7883 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7884 contents, rel->r_offset, value,
7885 rel->r_addend);
7886
7887 case R_ARM_TLS_LDO32:
7888 value = value - dtpoff_base (info);
7889
7890 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7891 contents, rel->r_offset, value,
7892 rel->r_addend);
7893
7894 case R_ARM_TLS_LDM32:
7895 {
7896 bfd_vma off;
7897
7898 if (globals->sgot == NULL)
7899 abort ();
7900
7901 off = globals->tls_ldm_got.offset;
7902
7903 if ((off & 1) != 0)
7904 off &= ~1;
7905 else
7906 {
7907 /* If we don't know the module number, create a relocation
7908 for it. */
7909 if (info->shared)
7910 {
7911 Elf_Internal_Rela outrel;
7912 bfd_byte *loc;
7913
7914 if (globals->srelgot == NULL)
7915 abort ();
7916
7917 outrel.r_addend = 0;
7918 outrel.r_offset = (globals->sgot->output_section->vma
7919 + globals->sgot->output_offset + off);
7920 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7921
7922 if (globals->use_rel)
7923 bfd_put_32 (output_bfd, outrel.r_addend,
7924 globals->sgot->contents + off);
7925
7926 loc = globals->srelgot->contents;
7927 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7928 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7929 }
7930 else
7931 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7932
7933 globals->tls_ldm_got.offset |= 1;
7934 }
7935
7936 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7937 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7938
7939 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7940 contents, rel->r_offset, value,
7941 rel->r_addend);
7942 }
7943
7944 case R_ARM_TLS_GD32:
7945 case R_ARM_TLS_IE32:
7946 {
7947 bfd_vma off;
7948 int indx;
7949 char tls_type;
7950
7951 if (globals->sgot == NULL)
7952 abort ();
7953
7954 indx = 0;
7955 if (h != NULL)
7956 {
7957 bfd_boolean dyn;
7958 dyn = globals->root.dynamic_sections_created;
7959 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7960 && (!info->shared
7961 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7962 {
7963 *unresolved_reloc_p = FALSE;
7964 indx = h->dynindx;
7965 }
7966 off = h->got.offset;
7967 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7968 }
7969 else
7970 {
7971 if (local_got_offsets == NULL)
7972 abort ();
7973 off = local_got_offsets[r_symndx];
7974 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7975 }
7976
7977 if (tls_type == GOT_UNKNOWN)
7978 abort ();
7979
7980 if ((off & 1) != 0)
7981 off &= ~1;
7982 else
7983 {
7984 bfd_boolean need_relocs = FALSE;
7985 Elf_Internal_Rela outrel;
7986 bfd_byte *loc = NULL;
7987 int cur_off = off;
7988
7989 /* The GOT entries have not been initialized yet. Do it
7990 now, and emit any relocations. If both an IE GOT and a
7991 GD GOT are necessary, we emit the GD first. */
7992
7993 if ((info->shared || indx != 0)
7994 && (h == NULL
7995 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7996 || h->root.type != bfd_link_hash_undefweak))
7997 {
7998 need_relocs = TRUE;
7999 if (globals->srelgot == NULL)
8000 abort ();
8001 loc = globals->srelgot->contents;
8002 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8003 }
8004
8005 if (tls_type & GOT_TLS_GD)
8006 {
8007 if (need_relocs)
8008 {
8009 outrel.r_addend = 0;
8010 outrel.r_offset = (globals->sgot->output_section->vma
8011 + globals->sgot->output_offset
8012 + cur_off);
8013 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8014
8015 if (globals->use_rel)
8016 bfd_put_32 (output_bfd, outrel.r_addend,
8017 globals->sgot->contents + cur_off);
8018
8019 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8020 globals->srelgot->reloc_count++;
8021 loc += RELOC_SIZE (globals);
8022
8023 if (indx == 0)
8024 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8025 globals->sgot->contents + cur_off + 4);
8026 else
8027 {
8028 outrel.r_addend = 0;
8029 outrel.r_info = ELF32_R_INFO (indx,
8030 R_ARM_TLS_DTPOFF32);
8031 outrel.r_offset += 4;
8032
8033 if (globals->use_rel)
8034 bfd_put_32 (output_bfd, outrel.r_addend,
8035 globals->sgot->contents + cur_off + 4);
8036
8037
8038 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8039 globals->srelgot->reloc_count++;
8040 loc += RELOC_SIZE (globals);
8041 }
8042 }
8043 else
8044 {
8045 /* If we are not emitting relocations for a
8046 general dynamic reference, then we must be in a
8047 static link or an executable link with the
8048 symbol binding locally. Mark it as belonging
8049 to module 1, the executable. */
8050 bfd_put_32 (output_bfd, 1,
8051 globals->sgot->contents + cur_off);
8052 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8053 globals->sgot->contents + cur_off + 4);
8054 }
8055
8056 cur_off += 8;
8057 }
8058
8059 if (tls_type & GOT_TLS_IE)
8060 {
8061 if (need_relocs)
8062 {
8063 if (indx == 0)
8064 outrel.r_addend = value - dtpoff_base (info);
8065 else
8066 outrel.r_addend = 0;
8067 outrel.r_offset = (globals->sgot->output_section->vma
8068 + globals->sgot->output_offset
8069 + cur_off);
8070 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8071
8072 if (globals->use_rel)
8073 bfd_put_32 (output_bfd, outrel.r_addend,
8074 globals->sgot->contents + cur_off);
8075
8076 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8077 globals->srelgot->reloc_count++;
8078 loc += RELOC_SIZE (globals);
8079 }
8080 else
8081 bfd_put_32 (output_bfd, tpoff (info, value),
8082 globals->sgot->contents + cur_off);
8083 cur_off += 4;
8084 }
8085
8086 if (h != NULL)
8087 h->got.offset |= 1;
8088 else
8089 local_got_offsets[r_symndx] |= 1;
8090 }
8091
8092 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8093 off += 8;
8094 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8095 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8096
8097 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8098 contents, rel->r_offset, value,
8099 rel->r_addend);
8100 }
8101
8102 case R_ARM_TLS_LE32:
8103 if (info->shared)
8104 {
8105 (*_bfd_error_handler)
8106 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8107 input_bfd, input_section,
8108 (long) rel->r_offset, howto->name);
8109 return (bfd_reloc_status_type) FALSE;
8110 }
8111 else
8112 value = tpoff (info, value);
8113
8114 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8115 contents, rel->r_offset, value,
8116 rel->r_addend);
8117
8118 case R_ARM_V4BX:
8119 if (globals->fix_v4bx)
8120 {
8121 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8122
8123 /* Ensure that we have a BX instruction. */
8124 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8125
8126 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8127 {
8128 /* Branch to veneer. */
8129 bfd_vma glue_addr;
8130 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8131 glue_addr -= input_section->output_section->vma
8132 + input_section->output_offset
8133 + rel->r_offset + 8;
8134 insn = (insn & 0xf0000000) | 0x0a000000
8135 | ((glue_addr >> 2) & 0x00ffffff);
8136 }
8137 else
8138 {
8139 /* Preserve Rm (lowest four bits) and the condition code
8140 (highest four bits). Other bits encode MOV PC,Rm. */
8141 insn = (insn & 0xf000000f) | 0x01a0f000;
8142 }
8143
8144 bfd_put_32 (input_bfd, insn, hit_data);
8145 }
8146 return bfd_reloc_ok;
8147
8148 case R_ARM_MOVW_ABS_NC:
8149 case R_ARM_MOVT_ABS:
8150 case R_ARM_MOVW_PREL_NC:
8151 case R_ARM_MOVT_PREL:
8152 /* Until we properly support segment-base-relative addressing then
8153 we assume the segment base to be zero, as for the group relocations.
8154 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8155 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8156 case R_ARM_MOVW_BREL_NC:
8157 case R_ARM_MOVW_BREL:
8158 case R_ARM_MOVT_BREL:
8159 {
8160 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8161
8162 if (globals->use_rel)
8163 {
8164 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8165 signed_addend = (addend ^ 0x8000) - 0x8000;
8166 }
8167
8168 value += signed_addend;
8169
8170 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8171 value -= (input_section->output_section->vma
8172 + input_section->output_offset + rel->r_offset);
8173
8174 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8175 return bfd_reloc_overflow;
8176
8177 if (sym_flags == STT_ARM_TFUNC)
8178 value |= 1;
8179
8180 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8181 || r_type == R_ARM_MOVT_BREL)
8182 value >>= 16;
8183
8184 insn &= 0xfff0f000;
8185 insn |= value & 0xfff;
8186 insn |= (value & 0xf000) << 4;
8187 bfd_put_32 (input_bfd, insn, hit_data);
8188 }
8189 return bfd_reloc_ok;
8190
8191 case R_ARM_THM_MOVW_ABS_NC:
8192 case R_ARM_THM_MOVT_ABS:
8193 case R_ARM_THM_MOVW_PREL_NC:
8194 case R_ARM_THM_MOVT_PREL:
8195 /* Until we properly support segment-base-relative addressing then
8196 we assume the segment base to be zero, as for the above relocations.
8197 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8198 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8199 as R_ARM_THM_MOVT_ABS. */
8200 case R_ARM_THM_MOVW_BREL_NC:
8201 case R_ARM_THM_MOVW_BREL:
8202 case R_ARM_THM_MOVT_BREL:
8203 {
8204 bfd_vma insn;
8205
8206 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8207 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8208
8209 if (globals->use_rel)
8210 {
8211 addend = ((insn >> 4) & 0xf000)
8212 | ((insn >> 15) & 0x0800)
8213 | ((insn >> 4) & 0x0700)
8214 | (insn & 0x00ff);
8215 signed_addend = (addend ^ 0x8000) - 0x8000;
8216 }
8217
8218 value += signed_addend;
8219
8220 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8221 value -= (input_section->output_section->vma
8222 + input_section->output_offset + rel->r_offset);
8223
8224 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8225 return bfd_reloc_overflow;
8226
8227 if (sym_flags == STT_ARM_TFUNC)
8228 value |= 1;
8229
8230 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8231 || r_type == R_ARM_THM_MOVT_BREL)
8232 value >>= 16;
8233
8234 insn &= 0xfbf08f00;
8235 insn |= (value & 0xf000) << 4;
8236 insn |= (value & 0x0800) << 15;
8237 insn |= (value & 0x0700) << 4;
8238 insn |= (value & 0x00ff);
8239
8240 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8241 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8242 }
8243 return bfd_reloc_ok;
8244
8245 case R_ARM_ALU_PC_G0_NC:
8246 case R_ARM_ALU_PC_G1_NC:
8247 case R_ARM_ALU_PC_G0:
8248 case R_ARM_ALU_PC_G1:
8249 case R_ARM_ALU_PC_G2:
8250 case R_ARM_ALU_SB_G0_NC:
8251 case R_ARM_ALU_SB_G1_NC:
8252 case R_ARM_ALU_SB_G0:
8253 case R_ARM_ALU_SB_G1:
8254 case R_ARM_ALU_SB_G2:
8255 {
8256 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8257 bfd_vma pc = input_section->output_section->vma
8258 + input_section->output_offset + rel->r_offset;
8259 /* sb should be the origin of the *segment* containing the symbol.
8260 It is not clear how to obtain this OS-dependent value, so we
8261 make an arbitrary choice of zero. */
8262 bfd_vma sb = 0;
8263 bfd_vma residual;
8264 bfd_vma g_n;
8265 bfd_signed_vma signed_value;
8266 int group = 0;
8267
8268 /* Determine which group of bits to select. */
8269 switch (r_type)
8270 {
8271 case R_ARM_ALU_PC_G0_NC:
8272 case R_ARM_ALU_PC_G0:
8273 case R_ARM_ALU_SB_G0_NC:
8274 case R_ARM_ALU_SB_G0:
8275 group = 0;
8276 break;
8277
8278 case R_ARM_ALU_PC_G1_NC:
8279 case R_ARM_ALU_PC_G1:
8280 case R_ARM_ALU_SB_G1_NC:
8281 case R_ARM_ALU_SB_G1:
8282 group = 1;
8283 break;
8284
8285 case R_ARM_ALU_PC_G2:
8286 case R_ARM_ALU_SB_G2:
8287 group = 2;
8288 break;
8289
8290 default:
8291 abort ();
8292 }
8293
8294 /* If REL, extract the addend from the insn. If RELA, it will
8295 have already been fetched for us. */
8296 if (globals->use_rel)
8297 {
8298 int negative;
8299 bfd_vma constant = insn & 0xff;
8300 bfd_vma rotation = (insn & 0xf00) >> 8;
8301
8302 if (rotation == 0)
8303 signed_addend = constant;
8304 else
8305 {
8306 /* Compensate for the fact that in the instruction, the
8307 rotation is stored in multiples of 2 bits. */
8308 rotation *= 2;
8309
8310 /* Rotate "constant" right by "rotation" bits. */
8311 signed_addend = (constant >> rotation) |
8312 (constant << (8 * sizeof (bfd_vma) - rotation));
8313 }
8314
8315 /* Determine if the instruction is an ADD or a SUB.
8316 (For REL, this determines the sign of the addend.) */
8317 negative = identify_add_or_sub (insn);
8318 if (negative == 0)
8319 {
8320 (*_bfd_error_handler)
8321 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8322 input_bfd, input_section,
8323 (long) rel->r_offset, howto->name);
8324 return bfd_reloc_overflow;
8325 }
8326
8327 signed_addend *= negative;
8328 }
8329
8330 /* Compute the value (X) to go in the place. */
8331 if (r_type == R_ARM_ALU_PC_G0_NC
8332 || r_type == R_ARM_ALU_PC_G1_NC
8333 || r_type == R_ARM_ALU_PC_G0
8334 || r_type == R_ARM_ALU_PC_G1
8335 || r_type == R_ARM_ALU_PC_G2)
8336 /* PC relative. */
8337 signed_value = value - pc + signed_addend;
8338 else
8339 /* Section base relative. */
8340 signed_value = value - sb + signed_addend;
8341
8342 /* If the target symbol is a Thumb function, then set the
8343 Thumb bit in the address. */
8344 if (sym_flags == STT_ARM_TFUNC)
8345 signed_value |= 1;
8346
8347 /* Calculate the value of the relevant G_n, in encoded
8348 constant-with-rotation format. */
8349 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8350 &residual);
8351
8352 /* Check for overflow if required. */
8353 if ((r_type == R_ARM_ALU_PC_G0
8354 || r_type == R_ARM_ALU_PC_G1
8355 || r_type == R_ARM_ALU_PC_G2
8356 || r_type == R_ARM_ALU_SB_G0
8357 || r_type == R_ARM_ALU_SB_G1
8358 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8359 {
8360 (*_bfd_error_handler)
8361 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8362 input_bfd, input_section,
8363 (long) rel->r_offset, abs (signed_value), howto->name);
8364 return bfd_reloc_overflow;
8365 }
8366
8367 /* Mask out the value and the ADD/SUB part of the opcode; take care
8368 not to destroy the S bit. */
8369 insn &= 0xff1ff000;
8370
8371 /* Set the opcode according to whether the value to go in the
8372 place is negative. */
8373 if (signed_value < 0)
8374 insn |= 1 << 22;
8375 else
8376 insn |= 1 << 23;
8377
8378 /* Encode the offset. */
8379 insn |= g_n;
8380
8381 bfd_put_32 (input_bfd, insn, hit_data);
8382 }
8383 return bfd_reloc_ok;
8384
8385 case R_ARM_LDR_PC_G0:
8386 case R_ARM_LDR_PC_G1:
8387 case R_ARM_LDR_PC_G2:
8388 case R_ARM_LDR_SB_G0:
8389 case R_ARM_LDR_SB_G1:
8390 case R_ARM_LDR_SB_G2:
8391 {
8392 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8393 bfd_vma pc = input_section->output_section->vma
8394 + input_section->output_offset + rel->r_offset;
8395 bfd_vma sb = 0; /* See note above. */
8396 bfd_vma residual;
8397 bfd_signed_vma signed_value;
8398 int group = 0;
8399
8400 /* Determine which groups of bits to calculate. */
8401 switch (r_type)
8402 {
8403 case R_ARM_LDR_PC_G0:
8404 case R_ARM_LDR_SB_G0:
8405 group = 0;
8406 break;
8407
8408 case R_ARM_LDR_PC_G1:
8409 case R_ARM_LDR_SB_G1:
8410 group = 1;
8411 break;
8412
8413 case R_ARM_LDR_PC_G2:
8414 case R_ARM_LDR_SB_G2:
8415 group = 2;
8416 break;
8417
8418 default:
8419 abort ();
8420 }
8421
8422 /* If REL, extract the addend from the insn. If RELA, it will
8423 have already been fetched for us. */
8424 if (globals->use_rel)
8425 {
8426 int negative = (insn & (1 << 23)) ? 1 : -1;
8427 signed_addend = negative * (insn & 0xfff);
8428 }
8429
8430 /* Compute the value (X) to go in the place. */
8431 if (r_type == R_ARM_LDR_PC_G0
8432 || r_type == R_ARM_LDR_PC_G1
8433 || r_type == R_ARM_LDR_PC_G2)
8434 /* PC relative. */
8435 signed_value = value - pc + signed_addend;
8436 else
8437 /* Section base relative. */
8438 signed_value = value - sb + signed_addend;
8439
8440 /* Calculate the value of the relevant G_{n-1} to obtain
8441 the residual at that stage. */
8442 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8443
8444 /* Check for overflow. */
8445 if (residual >= 0x1000)
8446 {
8447 (*_bfd_error_handler)
8448 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8449 input_bfd, input_section,
8450 (long) rel->r_offset, abs (signed_value), howto->name);
8451 return bfd_reloc_overflow;
8452 }
8453
8454 /* Mask out the value and U bit. */
8455 insn &= 0xff7ff000;
8456
8457 /* Set the U bit if the value to go in the place is non-negative. */
8458 if (signed_value >= 0)
8459 insn |= 1 << 23;
8460
8461 /* Encode the offset. */
8462 insn |= residual;
8463
8464 bfd_put_32 (input_bfd, insn, hit_data);
8465 }
8466 return bfd_reloc_ok;
8467
8468 case R_ARM_LDRS_PC_G0:
8469 case R_ARM_LDRS_PC_G1:
8470 case R_ARM_LDRS_PC_G2:
8471 case R_ARM_LDRS_SB_G0:
8472 case R_ARM_LDRS_SB_G1:
8473 case R_ARM_LDRS_SB_G2:
8474 {
8475 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8476 bfd_vma pc = input_section->output_section->vma
8477 + input_section->output_offset + rel->r_offset;
8478 bfd_vma sb = 0; /* See note above. */
8479 bfd_vma residual;
8480 bfd_signed_vma signed_value;
8481 int group = 0;
8482
8483 /* Determine which groups of bits to calculate. */
8484 switch (r_type)
8485 {
8486 case R_ARM_LDRS_PC_G0:
8487 case R_ARM_LDRS_SB_G0:
8488 group = 0;
8489 break;
8490
8491 case R_ARM_LDRS_PC_G1:
8492 case R_ARM_LDRS_SB_G1:
8493 group = 1;
8494 break;
8495
8496 case R_ARM_LDRS_PC_G2:
8497 case R_ARM_LDRS_SB_G2:
8498 group = 2;
8499 break;
8500
8501 default:
8502 abort ();
8503 }
8504
8505 /* If REL, extract the addend from the insn. If RELA, it will
8506 have already been fetched for us. */
8507 if (globals->use_rel)
8508 {
8509 int negative = (insn & (1 << 23)) ? 1 : -1;
8510 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8511 }
8512
8513 /* Compute the value (X) to go in the place. */
8514 if (r_type == R_ARM_LDRS_PC_G0
8515 || r_type == R_ARM_LDRS_PC_G1
8516 || r_type == R_ARM_LDRS_PC_G2)
8517 /* PC relative. */
8518 signed_value = value - pc + signed_addend;
8519 else
8520 /* Section base relative. */
8521 signed_value = value - sb + signed_addend;
8522
8523 /* Calculate the value of the relevant G_{n-1} to obtain
8524 the residual at that stage. */
8525 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8526
8527 /* Check for overflow. */
8528 if (residual >= 0x100)
8529 {
8530 (*_bfd_error_handler)
8531 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8532 input_bfd, input_section,
8533 (long) rel->r_offset, abs (signed_value), howto->name);
8534 return bfd_reloc_overflow;
8535 }
8536
8537 /* Mask out the value and U bit. */
8538 insn &= 0xff7ff0f0;
8539
8540 /* Set the U bit if the value to go in the place is non-negative. */
8541 if (signed_value >= 0)
8542 insn |= 1 << 23;
8543
8544 /* Encode the offset. */
8545 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8546
8547 bfd_put_32 (input_bfd, insn, hit_data);
8548 }
8549 return bfd_reloc_ok;
8550
8551 case R_ARM_LDC_PC_G0:
8552 case R_ARM_LDC_PC_G1:
8553 case R_ARM_LDC_PC_G2:
8554 case R_ARM_LDC_SB_G0:
8555 case R_ARM_LDC_SB_G1:
8556 case R_ARM_LDC_SB_G2:
8557 {
8558 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8559 bfd_vma pc = input_section->output_section->vma
8560 + input_section->output_offset + rel->r_offset;
8561 bfd_vma sb = 0; /* See note above. */
8562 bfd_vma residual;
8563 bfd_signed_vma signed_value;
8564 int group = 0;
8565
8566 /* Determine which groups of bits to calculate. */
8567 switch (r_type)
8568 {
8569 case R_ARM_LDC_PC_G0:
8570 case R_ARM_LDC_SB_G0:
8571 group = 0;
8572 break;
8573
8574 case R_ARM_LDC_PC_G1:
8575 case R_ARM_LDC_SB_G1:
8576 group = 1;
8577 break;
8578
8579 case R_ARM_LDC_PC_G2:
8580 case R_ARM_LDC_SB_G2:
8581 group = 2;
8582 break;
8583
8584 default:
8585 abort ();
8586 }
8587
8588 /* If REL, extract the addend from the insn. If RELA, it will
8589 have already been fetched for us. */
8590 if (globals->use_rel)
8591 {
8592 int negative = (insn & (1 << 23)) ? 1 : -1;
8593 signed_addend = negative * ((insn & 0xff) << 2);
8594 }
8595
8596 /* Compute the value (X) to go in the place. */
8597 if (r_type == R_ARM_LDC_PC_G0
8598 || r_type == R_ARM_LDC_PC_G1
8599 || r_type == R_ARM_LDC_PC_G2)
8600 /* PC relative. */
8601 signed_value = value - pc + signed_addend;
8602 else
8603 /* Section base relative. */
8604 signed_value = value - sb + signed_addend;
8605
8606 /* Calculate the value of the relevant G_{n-1} to obtain
8607 the residual at that stage. */
8608 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8609
8610 /* Check for overflow. (The absolute value to go in the place must be
8611 divisible by four and, after having been divided by four, must
8612 fit in eight bits.) */
8613 if ((residual & 0x3) != 0 || residual >= 0x400)
8614 {
8615 (*_bfd_error_handler)
8616 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8617 input_bfd, input_section,
8618 (long) rel->r_offset, abs (signed_value), howto->name);
8619 return bfd_reloc_overflow;
8620 }
8621
8622 /* Mask out the value and U bit. */
8623 insn &= 0xff7fff00;
8624
8625 /* Set the U bit if the value to go in the place is non-negative. */
8626 if (signed_value >= 0)
8627 insn |= 1 << 23;
8628
8629 /* Encode the offset. */
8630 insn |= residual >> 2;
8631
8632 bfd_put_32 (input_bfd, insn, hit_data);
8633 }
8634 return bfd_reloc_ok;
8635
8636 default:
8637 return bfd_reloc_notsupported;
8638 }
8639 }
8640
8641 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8642 static void
8643 arm_add_to_rel (bfd * abfd,
8644 bfd_byte * address,
8645 reloc_howto_type * howto,
8646 bfd_signed_vma increment)
8647 {
8648 bfd_signed_vma addend;
8649
8650 if (howto->type == R_ARM_THM_CALL
8651 || howto->type == R_ARM_THM_JUMP24)
8652 {
8653 int upper_insn, lower_insn;
8654 int upper, lower;
8655
8656 upper_insn = bfd_get_16 (abfd, address);
8657 lower_insn = bfd_get_16 (abfd, address + 2);
8658 upper = upper_insn & 0x7ff;
8659 lower = lower_insn & 0x7ff;
8660
8661 addend = (upper << 12) | (lower << 1);
8662 addend += increment;
8663 addend >>= 1;
8664
8665 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8666 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8667
8668 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8669 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8670 }
8671 else
8672 {
8673 bfd_vma contents;
8674
8675 contents = bfd_get_32 (abfd, address);
8676
8677 /* Get the (signed) value from the instruction. */
8678 addend = contents & howto->src_mask;
8679 if (addend & ((howto->src_mask + 1) >> 1))
8680 {
8681 bfd_signed_vma mask;
8682
8683 mask = -1;
8684 mask &= ~ howto->src_mask;
8685 addend |= mask;
8686 }
8687
8688 /* Add in the increment, (which is a byte value). */
8689 switch (howto->type)
8690 {
8691 default:
8692 addend += increment;
8693 break;
8694
8695 case R_ARM_PC24:
8696 case R_ARM_PLT32:
8697 case R_ARM_CALL:
8698 case R_ARM_JUMP24:
8699 addend <<= howto->size;
8700 addend += increment;
8701
8702 /* Should we check for overflow here ? */
8703
8704 /* Drop any undesired bits. */
8705 addend >>= howto->rightshift;
8706 break;
8707 }
8708
8709 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8710
8711 bfd_put_32 (abfd, contents, address);
8712 }
8713 }
8714
8715 #define IS_ARM_TLS_RELOC(R_TYPE) \
8716 ((R_TYPE) == R_ARM_TLS_GD32 \
8717 || (R_TYPE) == R_ARM_TLS_LDO32 \
8718 || (R_TYPE) == R_ARM_TLS_LDM32 \
8719 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8720 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8721 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8722 || (R_TYPE) == R_ARM_TLS_LE32 \
8723 || (R_TYPE) == R_ARM_TLS_IE32)
8724
8725 /* Relocate an ARM ELF section. */
8726
8727 static bfd_boolean
8728 elf32_arm_relocate_section (bfd * output_bfd,
8729 struct bfd_link_info * info,
8730 bfd * input_bfd,
8731 asection * input_section,
8732 bfd_byte * contents,
8733 Elf_Internal_Rela * relocs,
8734 Elf_Internal_Sym * local_syms,
8735 asection ** local_sections)
8736 {
8737 Elf_Internal_Shdr *symtab_hdr;
8738 struct elf_link_hash_entry **sym_hashes;
8739 Elf_Internal_Rela *rel;
8740 Elf_Internal_Rela *relend;
8741 const char *name;
8742 struct elf32_arm_link_hash_table * globals;
8743
8744 globals = elf32_arm_hash_table (info);
8745
8746 symtab_hdr = & elf_symtab_hdr (input_bfd);
8747 sym_hashes = elf_sym_hashes (input_bfd);
8748
8749 rel = relocs;
8750 relend = relocs + input_section->reloc_count;
8751 for (; rel < relend; rel++)
8752 {
8753 int r_type;
8754 reloc_howto_type * howto;
8755 unsigned long r_symndx;
8756 Elf_Internal_Sym * sym;
8757 asection * sec;
8758 struct elf_link_hash_entry * h;
8759 bfd_vma relocation;
8760 bfd_reloc_status_type r;
8761 arelent bfd_reloc;
8762 char sym_type;
8763 bfd_boolean unresolved_reloc = FALSE;
8764 char *error_message = NULL;
8765
8766 r_symndx = ELF32_R_SYM (rel->r_info);
8767 r_type = ELF32_R_TYPE (rel->r_info);
8768 r_type = arm_real_reloc_type (globals, r_type);
8769
8770 if ( r_type == R_ARM_GNU_VTENTRY
8771 || r_type == R_ARM_GNU_VTINHERIT)
8772 continue;
8773
8774 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8775 howto = bfd_reloc.howto;
8776
8777 h = NULL;
8778 sym = NULL;
8779 sec = NULL;
8780
8781 if (r_symndx < symtab_hdr->sh_info)
8782 {
8783 sym = local_syms + r_symndx;
8784 sym_type = ELF32_ST_TYPE (sym->st_info);
8785 sec = local_sections[r_symndx];
8786
8787 /* An object file might have a reference to a local
8788 undefined symbol. This is a daft object file, but we
8789 should at least do something about it. V4BX & NONE
8790 relocations do not use the symbol and are explicitly
8791 allowed to use the undefined symbol, so allow those. */
8792 if (r_type != R_ARM_V4BX
8793 && r_type != R_ARM_NONE
8794 && bfd_is_und_section (sec)
8795 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8796 {
8797 if (!info->callbacks->undefined_symbol
8798 (info, bfd_elf_string_from_elf_section
8799 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8800 input_bfd, input_section,
8801 rel->r_offset, TRUE))
8802 return FALSE;
8803 }
8804
8805 if (globals->use_rel)
8806 {
8807 relocation = (sec->output_section->vma
8808 + sec->output_offset
8809 + sym->st_value);
8810 if (!info->relocatable
8811 && (sec->flags & SEC_MERGE)
8812 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8813 {
8814 asection *msec;
8815 bfd_vma addend, value;
8816
8817 switch (r_type)
8818 {
8819 case R_ARM_MOVW_ABS_NC:
8820 case R_ARM_MOVT_ABS:
8821 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8822 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8823 addend = (addend ^ 0x8000) - 0x8000;
8824 break;
8825
8826 case R_ARM_THM_MOVW_ABS_NC:
8827 case R_ARM_THM_MOVT_ABS:
8828 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8829 << 16;
8830 value |= bfd_get_16 (input_bfd,
8831 contents + rel->r_offset + 2);
8832 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8833 | ((value & 0x04000000) >> 15);
8834 addend = (addend ^ 0x8000) - 0x8000;
8835 break;
8836
8837 default:
8838 if (howto->rightshift
8839 || (howto->src_mask & (howto->src_mask + 1)))
8840 {
8841 (*_bfd_error_handler)
8842 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8843 input_bfd, input_section,
8844 (long) rel->r_offset, howto->name);
8845 return FALSE;
8846 }
8847
8848 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8849
8850 /* Get the (signed) value from the instruction. */
8851 addend = value & howto->src_mask;
8852 if (addend & ((howto->src_mask + 1) >> 1))
8853 {
8854 bfd_signed_vma mask;
8855
8856 mask = -1;
8857 mask &= ~ howto->src_mask;
8858 addend |= mask;
8859 }
8860 break;
8861 }
8862
8863 msec = sec;
8864 addend =
8865 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8866 - relocation;
8867 addend += msec->output_section->vma + msec->output_offset;
8868
8869 /* Cases here must match those in the preceeding
8870 switch statement. */
8871 switch (r_type)
8872 {
8873 case R_ARM_MOVW_ABS_NC:
8874 case R_ARM_MOVT_ABS:
8875 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8876 | (addend & 0xfff);
8877 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8878 break;
8879
8880 case R_ARM_THM_MOVW_ABS_NC:
8881 case R_ARM_THM_MOVT_ABS:
8882 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8883 | (addend & 0xff) | ((addend & 0x0800) << 15);
8884 bfd_put_16 (input_bfd, value >> 16,
8885 contents + rel->r_offset);
8886 bfd_put_16 (input_bfd, value,
8887 contents + rel->r_offset + 2);
8888 break;
8889
8890 default:
8891 value = (value & ~ howto->dst_mask)
8892 | (addend & howto->dst_mask);
8893 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8894 break;
8895 }
8896 }
8897 }
8898 else
8899 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8900 }
8901 else
8902 {
8903 bfd_boolean warned;
8904
8905 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8906 r_symndx, symtab_hdr, sym_hashes,
8907 h, sec, relocation,
8908 unresolved_reloc, warned);
8909
8910 sym_type = h->type;
8911 }
8912
8913 if (sec != NULL && elf_discarded_section (sec))
8914 {
8915 /* For relocs against symbols from removed linkonce sections,
8916 or sections discarded by a linker script, we just want the
8917 section contents zeroed. Avoid any special processing. */
8918 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8919 rel->r_info = 0;
8920 rel->r_addend = 0;
8921 continue;
8922 }
8923
8924 if (info->relocatable)
8925 {
8926 /* This is a relocatable link. We don't have to change
8927 anything, unless the reloc is against a section symbol,
8928 in which case we have to adjust according to where the
8929 section symbol winds up in the output section. */
8930 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8931 {
8932 if (globals->use_rel)
8933 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8934 howto, (bfd_signed_vma) sec->output_offset);
8935 else
8936 rel->r_addend += sec->output_offset;
8937 }
8938 continue;
8939 }
8940
8941 if (h != NULL)
8942 name = h->root.root.string;
8943 else
8944 {
8945 name = (bfd_elf_string_from_elf_section
8946 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8947 if (name == NULL || *name == '\0')
8948 name = bfd_section_name (input_bfd, sec);
8949 }
8950
8951 if (r_symndx != 0
8952 && r_type != R_ARM_NONE
8953 && (h == NULL
8954 || h->root.type == bfd_link_hash_defined
8955 || h->root.type == bfd_link_hash_defweak)
8956 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8957 {
8958 (*_bfd_error_handler)
8959 ((sym_type == STT_TLS
8960 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8961 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8962 input_bfd,
8963 input_section,
8964 (long) rel->r_offset,
8965 howto->name,
8966 name);
8967 }
8968
8969 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8970 input_section, contents, rel,
8971 relocation, info, sec, name,
8972 (h ? ELF_ST_TYPE (h->type) :
8973 ELF_ST_TYPE (sym->st_info)), h,
8974 &unresolved_reloc, &error_message);
8975
8976 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8977 because such sections are not SEC_ALLOC and thus ld.so will
8978 not process them. */
8979 if (unresolved_reloc
8980 && !((input_section->flags & SEC_DEBUGGING) != 0
8981 && h->def_dynamic))
8982 {
8983 (*_bfd_error_handler)
8984 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8985 input_bfd,
8986 input_section,
8987 (long) rel->r_offset,
8988 howto->name,
8989 h->root.root.string);
8990 return FALSE;
8991 }
8992
8993 if (r != bfd_reloc_ok)
8994 {
8995 switch (r)
8996 {
8997 case bfd_reloc_overflow:
8998 /* If the overflowing reloc was to an undefined symbol,
8999 we have already printed one error message and there
9000 is no point complaining again. */
9001 if ((! h ||
9002 h->root.type != bfd_link_hash_undefined)
9003 && (!((*info->callbacks->reloc_overflow)
9004 (info, (h ? &h->root : NULL), name, howto->name,
9005 (bfd_vma) 0, input_bfd, input_section,
9006 rel->r_offset))))
9007 return FALSE;
9008 break;
9009
9010 case bfd_reloc_undefined:
9011 if (!((*info->callbacks->undefined_symbol)
9012 (info, name, input_bfd, input_section,
9013 rel->r_offset, TRUE)))
9014 return FALSE;
9015 break;
9016
9017 case bfd_reloc_outofrange:
9018 error_message = _("out of range");
9019 goto common_error;
9020
9021 case bfd_reloc_notsupported:
9022 error_message = _("unsupported relocation");
9023 goto common_error;
9024
9025 case bfd_reloc_dangerous:
9026 /* error_message should already be set. */
9027 goto common_error;
9028
9029 default:
9030 error_message = _("unknown error");
9031 /* Fall through. */
9032
9033 common_error:
9034 BFD_ASSERT (error_message != NULL);
9035 if (!((*info->callbacks->reloc_dangerous)
9036 (info, error_message, input_bfd, input_section,
9037 rel->r_offset)))
9038 return FALSE;
9039 break;
9040 }
9041 }
9042 }
9043
9044 return TRUE;
9045 }
9046
9047 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
9048 adds the edit to the start of the list. (The list must be built in order of
9049 ascending INDEX: the function's callers are primarily responsible for
9050 maintaining that condition). */
9051
9052 static void
9053 add_unwind_table_edit (arm_unwind_table_edit **head,
9054 arm_unwind_table_edit **tail,
9055 arm_unwind_edit_type type,
9056 asection *linked_section,
9057 unsigned int index)
9058 {
9059 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9060 xmalloc (sizeof (arm_unwind_table_edit));
9061
9062 new_edit->type = type;
9063 new_edit->linked_section = linked_section;
9064 new_edit->index = index;
9065
9066 if (index > 0)
9067 {
9068 new_edit->next = NULL;
9069
9070 if (*tail)
9071 (*tail)->next = new_edit;
9072
9073 (*tail) = new_edit;
9074
9075 if (!*head)
9076 (*head) = new_edit;
9077 }
9078 else
9079 {
9080 new_edit->next = *head;
9081
9082 if (!*tail)
9083 *tail = new_edit;
9084
9085 *head = new_edit;
9086 }
9087 }
9088
9089 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9090
9091 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9092 static void
9093 adjust_exidx_size(asection *exidx_sec, int adjust)
9094 {
9095 asection *out_sec;
9096
9097 if (!exidx_sec->rawsize)
9098 exidx_sec->rawsize = exidx_sec->size;
9099
9100 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9101 out_sec = exidx_sec->output_section;
9102 /* Adjust size of output section. */
9103 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9104 }
9105
9106 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9107 static void
9108 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9109 {
9110 struct _arm_elf_section_data *exidx_arm_data;
9111
9112 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9113 add_unwind_table_edit (
9114 &exidx_arm_data->u.exidx.unwind_edit_list,
9115 &exidx_arm_data->u.exidx.unwind_edit_tail,
9116 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9117
9118 adjust_exidx_size(exidx_sec, 8);
9119 }
9120
9121 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9122 made to those tables, such that:
9123
9124 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9125 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9126 codes which have been inlined into the index).
9127
9128 The edits are applied when the tables are written
9129 (in elf32_arm_write_section).
9130 */
9131
9132 bfd_boolean
9133 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9134 unsigned int num_text_sections,
9135 struct bfd_link_info *info)
9136 {
9137 bfd *inp;
9138 unsigned int last_second_word = 0, i;
9139 asection *last_exidx_sec = NULL;
9140 asection *last_text_sec = NULL;
9141 int last_unwind_type = -1;
9142
9143 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9144 text sections. */
9145 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9146 {
9147 asection *sec;
9148
9149 for (sec = inp->sections; sec != NULL; sec = sec->next)
9150 {
9151 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9152 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9153
9154 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9155 continue;
9156
9157 if (elf_sec->linked_to)
9158 {
9159 Elf_Internal_Shdr *linked_hdr
9160 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9161 struct _arm_elf_section_data *linked_sec_arm_data
9162 = get_arm_elf_section_data (linked_hdr->bfd_section);
9163
9164 if (linked_sec_arm_data == NULL)
9165 continue;
9166
9167 /* Link this .ARM.exidx section back from the text section it
9168 describes. */
9169 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9170 }
9171 }
9172 }
9173
9174 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9175 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9176 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9177 */
9178
9179 for (i = 0; i < num_text_sections; i++)
9180 {
9181 asection *sec = text_section_order[i];
9182 asection *exidx_sec;
9183 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9184 struct _arm_elf_section_data *exidx_arm_data;
9185 bfd_byte *contents = NULL;
9186 int deleted_exidx_bytes = 0;
9187 bfd_vma j;
9188 arm_unwind_table_edit *unwind_edit_head = NULL;
9189 arm_unwind_table_edit *unwind_edit_tail = NULL;
9190 Elf_Internal_Shdr *hdr;
9191 bfd *ibfd;
9192
9193 if (arm_data == NULL)
9194 continue;
9195
9196 exidx_sec = arm_data->u.text.arm_exidx_sec;
9197 if (exidx_sec == NULL)
9198 {
9199 /* Section has no unwind data. */
9200 if (last_unwind_type == 0 || !last_exidx_sec)
9201 continue;
9202
9203 /* Ignore zero sized sections. */
9204 if (sec->size == 0)
9205 continue;
9206
9207 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9208 last_unwind_type = 0;
9209 continue;
9210 }
9211
9212 /* Skip /DISCARD/ sections. */
9213 if (bfd_is_abs_section (exidx_sec->output_section))
9214 continue;
9215
9216 hdr = &elf_section_data (exidx_sec)->this_hdr;
9217 if (hdr->sh_type != SHT_ARM_EXIDX)
9218 continue;
9219
9220 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9221 if (exidx_arm_data == NULL)
9222 continue;
9223
9224 ibfd = exidx_sec->owner;
9225
9226 if (hdr->contents != NULL)
9227 contents = hdr->contents;
9228 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9229 /* An error? */
9230 continue;
9231
9232 for (j = 0; j < hdr->sh_size; j += 8)
9233 {
9234 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9235 int unwind_type;
9236 int elide = 0;
9237
9238 /* An EXIDX_CANTUNWIND entry. */
9239 if (second_word == 1)
9240 {
9241 if (last_unwind_type == 0)
9242 elide = 1;
9243 unwind_type = 0;
9244 }
9245 /* Inlined unwinding data. Merge if equal to previous. */
9246 else if ((second_word & 0x80000000) != 0)
9247 {
9248 if (last_second_word == second_word && last_unwind_type == 1)
9249 elide = 1;
9250 unwind_type = 1;
9251 last_second_word = second_word;
9252 }
9253 /* Normal table entry. In theory we could merge these too,
9254 but duplicate entries are likely to be much less common. */
9255 else
9256 unwind_type = 2;
9257
9258 if (elide)
9259 {
9260 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9261 DELETE_EXIDX_ENTRY, NULL, j / 8);
9262
9263 deleted_exidx_bytes += 8;
9264 }
9265
9266 last_unwind_type = unwind_type;
9267 }
9268
9269 /* Free contents if we allocated it ourselves. */
9270 if (contents != hdr->contents)
9271 free (contents);
9272
9273 /* Record edits to be applied later (in elf32_arm_write_section). */
9274 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9275 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9276
9277 if (deleted_exidx_bytes > 0)
9278 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9279
9280 last_exidx_sec = exidx_sec;
9281 last_text_sec = sec;
9282 }
9283
9284 /* Add terminating CANTUNWIND entry. */
9285 if (last_exidx_sec && last_unwind_type != 0)
9286 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9287
9288 return TRUE;
9289 }
9290
9291 static bfd_boolean
9292 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9293 bfd *ibfd, const char *name)
9294 {
9295 asection *sec, *osec;
9296
9297 sec = bfd_get_section_by_name (ibfd, name);
9298 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9299 return TRUE;
9300
9301 osec = sec->output_section;
9302 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9303 return TRUE;
9304
9305 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9306 sec->output_offset, sec->size))
9307 return FALSE;
9308
9309 return TRUE;
9310 }
9311
9312 static bfd_boolean
9313 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9314 {
9315 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9316
9317 /* Invoke the regular ELF backend linker to do all the work. */
9318 if (!bfd_elf_final_link (abfd, info))
9319 return FALSE;
9320
9321 /* Write out any glue sections now that we have created all the
9322 stubs. */
9323 if (globals->bfd_of_glue_owner != NULL)
9324 {
9325 if (! elf32_arm_output_glue_section (info, abfd,
9326 globals->bfd_of_glue_owner,
9327 ARM2THUMB_GLUE_SECTION_NAME))
9328 return FALSE;
9329
9330 if (! elf32_arm_output_glue_section (info, abfd,
9331 globals->bfd_of_glue_owner,
9332 THUMB2ARM_GLUE_SECTION_NAME))
9333 return FALSE;
9334
9335 if (! elf32_arm_output_glue_section (info, abfd,
9336 globals->bfd_of_glue_owner,
9337 VFP11_ERRATUM_VENEER_SECTION_NAME))
9338 return FALSE;
9339
9340 if (! elf32_arm_output_glue_section (info, abfd,
9341 globals->bfd_of_glue_owner,
9342 ARM_BX_GLUE_SECTION_NAME))
9343 return FALSE;
9344 }
9345
9346 return TRUE;
9347 }
9348
9349 /* Set the right machine number. */
9350
9351 static bfd_boolean
9352 elf32_arm_object_p (bfd *abfd)
9353 {
9354 unsigned int mach;
9355
9356 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9357
9358 if (mach != bfd_mach_arm_unknown)
9359 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9360
9361 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9362 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9363
9364 else
9365 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9366
9367 return TRUE;
9368 }
9369
9370 /* Function to keep ARM specific flags in the ELF header. */
9371
9372 static bfd_boolean
9373 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9374 {
9375 if (elf_flags_init (abfd)
9376 && elf_elfheader (abfd)->e_flags != flags)
9377 {
9378 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9379 {
9380 if (flags & EF_ARM_INTERWORK)
9381 (*_bfd_error_handler)
9382 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9383 abfd);
9384 else
9385 _bfd_error_handler
9386 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9387 abfd);
9388 }
9389 }
9390 else
9391 {
9392 elf_elfheader (abfd)->e_flags = flags;
9393 elf_flags_init (abfd) = TRUE;
9394 }
9395
9396 return TRUE;
9397 }
9398
9399 /* Copy backend specific data from one object module to another. */
9400
9401 static bfd_boolean
9402 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9403 {
9404 flagword in_flags;
9405 flagword out_flags;
9406
9407 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9408 return TRUE;
9409
9410 in_flags = elf_elfheader (ibfd)->e_flags;
9411 out_flags = elf_elfheader (obfd)->e_flags;
9412
9413 if (elf_flags_init (obfd)
9414 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9415 && in_flags != out_flags)
9416 {
9417 /* Cannot mix APCS26 and APCS32 code. */
9418 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9419 return FALSE;
9420
9421 /* Cannot mix float APCS and non-float APCS code. */
9422 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9423 return FALSE;
9424
9425 /* If the src and dest have different interworking flags
9426 then turn off the interworking bit. */
9427 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9428 {
9429 if (out_flags & EF_ARM_INTERWORK)
9430 _bfd_error_handler
9431 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9432 obfd, ibfd);
9433
9434 in_flags &= ~EF_ARM_INTERWORK;
9435 }
9436
9437 /* Likewise for PIC, though don't warn for this case. */
9438 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9439 in_flags &= ~EF_ARM_PIC;
9440 }
9441
9442 elf_elfheader (obfd)->e_flags = in_flags;
9443 elf_flags_init (obfd) = TRUE;
9444
9445 /* Also copy the EI_OSABI field. */
9446 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9447 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9448
9449 /* Copy object attributes. */
9450 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9451
9452 return TRUE;
9453 }
9454
9455 /* Values for Tag_ABI_PCS_R9_use. */
9456 enum
9457 {
9458 AEABI_R9_V6,
9459 AEABI_R9_SB,
9460 AEABI_R9_TLS,
9461 AEABI_R9_unused
9462 };
9463
9464 /* Values for Tag_ABI_PCS_RW_data. */
9465 enum
9466 {
9467 AEABI_PCS_RW_data_absolute,
9468 AEABI_PCS_RW_data_PCrel,
9469 AEABI_PCS_RW_data_SBrel,
9470 AEABI_PCS_RW_data_unused
9471 };
9472
9473 /* Values for Tag_ABI_enum_size. */
9474 enum
9475 {
9476 AEABI_enum_unused,
9477 AEABI_enum_short,
9478 AEABI_enum_wide,
9479 AEABI_enum_forced_wide
9480 };
9481
9482 /* Determine whether an object attribute tag takes an integer, a
9483 string or both. */
9484
9485 static int
9486 elf32_arm_obj_attrs_arg_type (int tag)
9487 {
9488 if (tag == Tag_compatibility)
9489 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9490 else if (tag == Tag_nodefaults)
9491 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9492 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9493 return ATTR_TYPE_FLAG_STR_VAL;
9494 else if (tag < 32)
9495 return ATTR_TYPE_FLAG_INT_VAL;
9496 else
9497 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9498 }
9499
9500 /* The ABI defines that Tag_conformance should be emitted first, and that
9501 Tag_nodefaults should be second (if either is defined). This sets those
9502 two positions, and bumps up the position of all the remaining tags to
9503 compensate. */
9504 static int
9505 elf32_arm_obj_attrs_order (int num)
9506 {
9507 if (num == 4)
9508 return Tag_conformance;
9509 if (num == 5)
9510 return Tag_nodefaults;
9511 if ((num - 2) < Tag_nodefaults)
9512 return num - 2;
9513 if ((num - 1) < Tag_conformance)
9514 return num - 1;
9515 return num;
9516 }
9517
9518 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9519 Returns -1 if no architecture could be read. */
9520
9521 static int
9522 get_secondary_compatible_arch (bfd *abfd)
9523 {
9524 obj_attribute *attr =
9525 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9526
9527 /* Note: the tag and its argument below are uleb128 values, though
9528 currently-defined values fit in one byte for each. */
9529 if (attr->s
9530 && attr->s[0] == Tag_CPU_arch
9531 && (attr->s[1] & 128) != 128
9532 && attr->s[2] == 0)
9533 return attr->s[1];
9534
9535 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9536 return -1;
9537 }
9538
9539 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9540 The tag is removed if ARCH is -1. */
9541
9542 static void
9543 set_secondary_compatible_arch (bfd *abfd, int arch)
9544 {
9545 obj_attribute *attr =
9546 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9547
9548 if (arch == -1)
9549 {
9550 attr->s = NULL;
9551 return;
9552 }
9553
9554 /* Note: the tag and its argument below are uleb128 values, though
9555 currently-defined values fit in one byte for each. */
9556 if (!attr->s)
9557 attr->s = (char *) bfd_alloc (abfd, 3);
9558 attr->s[0] = Tag_CPU_arch;
9559 attr->s[1] = arch;
9560 attr->s[2] = '\0';
9561 }
9562
9563 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9564 into account. */
9565
9566 static int
9567 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9568 int newtag, int secondary_compat)
9569 {
9570 #define T(X) TAG_CPU_ARCH_##X
9571 int tagl, tagh, result;
9572 const int v6t2[] =
9573 {
9574 T(V6T2), /* PRE_V4. */
9575 T(V6T2), /* V4. */
9576 T(V6T2), /* V4T. */
9577 T(V6T2), /* V5T. */
9578 T(V6T2), /* V5TE. */
9579 T(V6T2), /* V5TEJ. */
9580 T(V6T2), /* V6. */
9581 T(V7), /* V6KZ. */
9582 T(V6T2) /* V6T2. */
9583 };
9584 const int v6k[] =
9585 {
9586 T(V6K), /* PRE_V4. */
9587 T(V6K), /* V4. */
9588 T(V6K), /* V4T. */
9589 T(V6K), /* V5T. */
9590 T(V6K), /* V5TE. */
9591 T(V6K), /* V5TEJ. */
9592 T(V6K), /* V6. */
9593 T(V6KZ), /* V6KZ. */
9594 T(V7), /* V6T2. */
9595 T(V6K) /* V6K. */
9596 };
9597 const int v7[] =
9598 {
9599 T(V7), /* PRE_V4. */
9600 T(V7), /* V4. */
9601 T(V7), /* V4T. */
9602 T(V7), /* V5T. */
9603 T(V7), /* V5TE. */
9604 T(V7), /* V5TEJ. */
9605 T(V7), /* V6. */
9606 T(V7), /* V6KZ. */
9607 T(V7), /* V6T2. */
9608 T(V7), /* V6K. */
9609 T(V7) /* V7. */
9610 };
9611 const int v6_m[] =
9612 {
9613 -1, /* PRE_V4. */
9614 -1, /* V4. */
9615 T(V6K), /* V4T. */
9616 T(V6K), /* V5T. */
9617 T(V6K), /* V5TE. */
9618 T(V6K), /* V5TEJ. */
9619 T(V6K), /* V6. */
9620 T(V6KZ), /* V6KZ. */
9621 T(V7), /* V6T2. */
9622 T(V6K), /* V6K. */
9623 T(V7), /* V7. */
9624 T(V6_M) /* V6_M. */
9625 };
9626 const int v6s_m[] =
9627 {
9628 -1, /* PRE_V4. */
9629 -1, /* V4. */
9630 T(V6K), /* V4T. */
9631 T(V6K), /* V5T. */
9632 T(V6K), /* V5TE. */
9633 T(V6K), /* V5TEJ. */
9634 T(V6K), /* V6. */
9635 T(V6KZ), /* V6KZ. */
9636 T(V7), /* V6T2. */
9637 T(V6K), /* V6K. */
9638 T(V7), /* V7. */
9639 T(V6S_M), /* V6_M. */
9640 T(V6S_M) /* V6S_M. */
9641 };
9642 const int v7e_m[] =
9643 {
9644 -1, /* PRE_V4. */
9645 -1, /* V4. */
9646 T(V7E_M), /* V4T. */
9647 T(V7E_M), /* V5T. */
9648 T(V7E_M), /* V5TE. */
9649 T(V7E_M), /* V5TEJ. */
9650 T(V7E_M), /* V6. */
9651 T(V7E_M), /* V6KZ. */
9652 T(V7E_M), /* V6T2. */
9653 T(V7E_M), /* V6K. */
9654 T(V7E_M), /* V7. */
9655 T(V7E_M), /* V6_M. */
9656 T(V7E_M), /* V6S_M. */
9657 T(V7E_M) /* V7E_M. */
9658 };
9659 const int v4t_plus_v6_m[] =
9660 {
9661 -1, /* PRE_V4. */
9662 -1, /* V4. */
9663 T(V4T), /* V4T. */
9664 T(V5T), /* V5T. */
9665 T(V5TE), /* V5TE. */
9666 T(V5TEJ), /* V5TEJ. */
9667 T(V6), /* V6. */
9668 T(V6KZ), /* V6KZ. */
9669 T(V6T2), /* V6T2. */
9670 T(V6K), /* V6K. */
9671 T(V7), /* V7. */
9672 T(V6_M), /* V6_M. */
9673 T(V6S_M), /* V6S_M. */
9674 T(V7E_M), /* V7E_M. */
9675 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9676 };
9677 const int *comb[] =
9678 {
9679 v6t2,
9680 v6k,
9681 v7,
9682 v6_m,
9683 v6s_m,
9684 v7e_m,
9685 /* Pseudo-architecture. */
9686 v4t_plus_v6_m
9687 };
9688
9689 /* Check we've not got a higher architecture than we know about. */
9690
9691 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9692 {
9693 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9694 return -1;
9695 }
9696
9697 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9698
9699 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9700 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9701 oldtag = T(V4T_PLUS_V6_M);
9702
9703 /* And override the new tag if we have a Tag_also_compatible_with on the
9704 input. */
9705
9706 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9707 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9708 newtag = T(V4T_PLUS_V6_M);
9709
9710 tagl = (oldtag < newtag) ? oldtag : newtag;
9711 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9712
9713 /* Architectures before V6KZ add features monotonically. */
9714 if (tagh <= TAG_CPU_ARCH_V6KZ)
9715 return result;
9716
9717 result = comb[tagh - T(V6T2)][tagl];
9718
9719 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9720 as the canonical version. */
9721 if (result == T(V4T_PLUS_V6_M))
9722 {
9723 result = T(V4T);
9724 *secondary_compat_out = T(V6_M);
9725 }
9726 else
9727 *secondary_compat_out = -1;
9728
9729 if (result == -1)
9730 {
9731 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9732 ibfd, oldtag, newtag);
9733 return -1;
9734 }
9735
9736 return result;
9737 #undef T
9738 }
9739
9740 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9741 are conflicting attributes. */
9742
9743 static bfd_boolean
9744 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9745 {
9746 obj_attribute *in_attr;
9747 obj_attribute *out_attr;
9748 obj_attribute_list *in_list;
9749 obj_attribute_list *out_list;
9750 obj_attribute_list **out_listp;
9751 /* Some tags have 0 = don't care, 1 = strong requirement,
9752 2 = weak requirement. */
9753 static const int order_021[3] = {0, 2, 1};
9754 int i;
9755 bfd_boolean result = TRUE;
9756
9757 /* Skip the linker stubs file. This preserves previous behavior
9758 of accepting unknown attributes in the first input file - but
9759 is that a bug? */
9760 if (ibfd->flags & BFD_LINKER_CREATED)
9761 return TRUE;
9762
9763 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9764 {
9765 /* This is the first object. Copy the attributes. */
9766 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9767
9768 /* Use the Tag_null value to indicate the attributes have been
9769 initialized. */
9770 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9771
9772 return TRUE;
9773 }
9774
9775 in_attr = elf_known_obj_attributes_proc (ibfd);
9776 out_attr = elf_known_obj_attributes_proc (obfd);
9777 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9778 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9779 {
9780 /* Ignore mismatches if the object doesn't use floating point. */
9781 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9782 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9783 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9784 {
9785 _bfd_error_handler
9786 (_("error: %B uses VFP register arguments, %B does not"),
9787 ibfd, obfd);
9788 result = FALSE;
9789 }
9790 }
9791
9792 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9793 {
9794 /* Merge this attribute with existing attributes. */
9795 switch (i)
9796 {
9797 case Tag_CPU_raw_name:
9798 case Tag_CPU_name:
9799 /* These are merged after Tag_CPU_arch. */
9800 break;
9801
9802 case Tag_ABI_optimization_goals:
9803 case Tag_ABI_FP_optimization_goals:
9804 /* Use the first value seen. */
9805 break;
9806
9807 case Tag_CPU_arch:
9808 {
9809 int secondary_compat = -1, secondary_compat_out = -1;
9810 unsigned int saved_out_attr = out_attr[i].i;
9811 static const char *name_table[] = {
9812 /* These aren't real CPU names, but we can't guess
9813 that from the architecture version alone. */
9814 "Pre v4",
9815 "ARM v4",
9816 "ARM v4T",
9817 "ARM v5T",
9818 "ARM v5TE",
9819 "ARM v5TEJ",
9820 "ARM v6",
9821 "ARM v6KZ",
9822 "ARM v6T2",
9823 "ARM v6K",
9824 "ARM v7",
9825 "ARM v6-M",
9826 "ARM v6S-M"
9827 };
9828
9829 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9830 secondary_compat = get_secondary_compatible_arch (ibfd);
9831 secondary_compat_out = get_secondary_compatible_arch (obfd);
9832 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9833 &secondary_compat_out,
9834 in_attr[i].i,
9835 secondary_compat);
9836 set_secondary_compatible_arch (obfd, secondary_compat_out);
9837
9838 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9839 if (out_attr[i].i == saved_out_attr)
9840 ; /* Leave the names alone. */
9841 else if (out_attr[i].i == in_attr[i].i)
9842 {
9843 /* The output architecture has been changed to match the
9844 input architecture. Use the input names. */
9845 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9846 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9847 : NULL;
9848 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9849 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9850 : NULL;
9851 }
9852 else
9853 {
9854 out_attr[Tag_CPU_name].s = NULL;
9855 out_attr[Tag_CPU_raw_name].s = NULL;
9856 }
9857
9858 /* If we still don't have a value for Tag_CPU_name,
9859 make one up now. Tag_CPU_raw_name remains blank. */
9860 if (out_attr[Tag_CPU_name].s == NULL
9861 && out_attr[i].i < ARRAY_SIZE (name_table))
9862 out_attr[Tag_CPU_name].s =
9863 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9864 }
9865 break;
9866
9867 case Tag_ARM_ISA_use:
9868 case Tag_THUMB_ISA_use:
9869 case Tag_WMMX_arch:
9870 case Tag_Advanced_SIMD_arch:
9871 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9872 case Tag_ABI_FP_rounding:
9873 case Tag_ABI_FP_exceptions:
9874 case Tag_ABI_FP_user_exceptions:
9875 case Tag_ABI_FP_number_model:
9876 case Tag_VFP_HP_extension:
9877 case Tag_CPU_unaligned_access:
9878 case Tag_T2EE_use:
9879 case Tag_Virtualization_use:
9880 case Tag_MPextension_use:
9881 /* Use the largest value specified. */
9882 if (in_attr[i].i > out_attr[i].i)
9883 out_attr[i].i = in_attr[i].i;
9884 break;
9885
9886 case Tag_ABI_align8_preserved:
9887 case Tag_ABI_PCS_RO_data:
9888 /* Use the smallest value specified. */
9889 if (in_attr[i].i < out_attr[i].i)
9890 out_attr[i].i = in_attr[i].i;
9891 break;
9892
9893 case Tag_ABI_align8_needed:
9894 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9895 && (in_attr[Tag_ABI_align8_preserved].i == 0
9896 || out_attr[Tag_ABI_align8_preserved].i == 0))
9897 {
9898 /* This error message should be enabled once all non-conformant
9899 binaries in the toolchain have had the attributes set
9900 properly.
9901 _bfd_error_handler
9902 (_("error: %B: 8-byte data alignment conflicts with %B"),
9903 obfd, ibfd);
9904 result = FALSE; */
9905 }
9906 /* Fall through. */
9907 case Tag_ABI_FP_denormal:
9908 case Tag_ABI_PCS_GOT_use:
9909 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9910 value if greater than 2 (for future-proofing). */
9911 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9912 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9913 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9914 out_attr[i].i = in_attr[i].i;
9915 break;
9916
9917
9918 case Tag_CPU_arch_profile:
9919 if (out_attr[i].i != in_attr[i].i)
9920 {
9921 /* 0 will merge with anything.
9922 'A' and 'S' merge to 'A'.
9923 'R' and 'S' merge to 'R'.
9924 'M' and 'A|R|S' is an error. */
9925 if (out_attr[i].i == 0
9926 || (out_attr[i].i == 'S'
9927 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9928 out_attr[i].i = in_attr[i].i;
9929 else if (in_attr[i].i == 0
9930 || (in_attr[i].i == 'S'
9931 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9932 ; /* Do nothing. */
9933 else
9934 {
9935 _bfd_error_handler
9936 (_("error: %B: Conflicting architecture profiles %c/%c"),
9937 ibfd,
9938 in_attr[i].i ? in_attr[i].i : '0',
9939 out_attr[i].i ? out_attr[i].i : '0');
9940 result = FALSE;
9941 }
9942 }
9943 break;
9944 case Tag_VFP_arch:
9945 {
9946 static const struct
9947 {
9948 int ver;
9949 int regs;
9950 } vfp_versions[7] =
9951 {
9952 {0, 0},
9953 {1, 16},
9954 {2, 16},
9955 {3, 32},
9956 {3, 16},
9957 {4, 32},
9958 {4, 16}
9959 };
9960 int ver;
9961 int regs;
9962 int newval;
9963
9964 /* Values greater than 6 aren't defined, so just pick the
9965 biggest */
9966 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
9967 {
9968 out_attr[i] = in_attr[i];
9969 break;
9970 }
9971 /* The output uses the superset of input features
9972 (ISA version) and registers. */
9973 ver = vfp_versions[in_attr[i].i].ver;
9974 if (ver < vfp_versions[out_attr[i].i].ver)
9975 ver = vfp_versions[out_attr[i].i].ver;
9976 regs = vfp_versions[in_attr[i].i].regs;
9977 if (regs < vfp_versions[out_attr[i].i].regs)
9978 regs = vfp_versions[out_attr[i].i].regs;
9979 /* This assumes all possible supersets are also a valid
9980 options. */
9981 for (newval = 6; newval > 0; newval--)
9982 {
9983 if (regs == vfp_versions[newval].regs
9984 && ver == vfp_versions[newval].ver)
9985 break;
9986 }
9987 out_attr[i].i = newval;
9988 }
9989 break;
9990 case Tag_PCS_config:
9991 if (out_attr[i].i == 0)
9992 out_attr[i].i = in_attr[i].i;
9993 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9994 {
9995 /* It's sometimes ok to mix different configs, so this is only
9996 a warning. */
9997 _bfd_error_handler
9998 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9999 }
10000 break;
10001 case Tag_ABI_PCS_R9_use:
10002 if (in_attr[i].i != out_attr[i].i
10003 && out_attr[i].i != AEABI_R9_unused
10004 && in_attr[i].i != AEABI_R9_unused)
10005 {
10006 _bfd_error_handler
10007 (_("error: %B: Conflicting use of R9"), ibfd);
10008 result = FALSE;
10009 }
10010 if (out_attr[i].i == AEABI_R9_unused)
10011 out_attr[i].i = in_attr[i].i;
10012 break;
10013 case Tag_ABI_PCS_RW_data:
10014 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10015 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10016 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10017 {
10018 _bfd_error_handler
10019 (_("error: %B: SB relative addressing conflicts with use of R9"),
10020 ibfd);
10021 result = FALSE;
10022 }
10023 /* Use the smallest value specified. */
10024 if (in_attr[i].i < out_attr[i].i)
10025 out_attr[i].i = in_attr[i].i;
10026 break;
10027 case Tag_ABI_PCS_wchar_t:
10028 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10029 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10030 {
10031 _bfd_error_handler
10032 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10033 ibfd, in_attr[i].i, out_attr[i].i);
10034 }
10035 else if (in_attr[i].i && !out_attr[i].i)
10036 out_attr[i].i = in_attr[i].i;
10037 break;
10038 case Tag_ABI_enum_size:
10039 if (in_attr[i].i != AEABI_enum_unused)
10040 {
10041 if (out_attr[i].i == AEABI_enum_unused
10042 || out_attr[i].i == AEABI_enum_forced_wide)
10043 {
10044 /* The existing object is compatible with anything.
10045 Use whatever requirements the new object has. */
10046 out_attr[i].i = in_attr[i].i;
10047 }
10048 else if (in_attr[i].i != AEABI_enum_forced_wide
10049 && out_attr[i].i != in_attr[i].i
10050 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10051 {
10052 static const char *aeabi_enum_names[] =
10053 { "", "variable-size", "32-bit", "" };
10054 const char *in_name =
10055 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10056 ? aeabi_enum_names[in_attr[i].i]
10057 : "<unknown>";
10058 const char *out_name =
10059 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10060 ? aeabi_enum_names[out_attr[i].i]
10061 : "<unknown>";
10062 _bfd_error_handler
10063 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10064 ibfd, in_name, out_name);
10065 }
10066 }
10067 break;
10068 case Tag_ABI_VFP_args:
10069 /* Aready done. */
10070 break;
10071 case Tag_ABI_WMMX_args:
10072 if (in_attr[i].i != out_attr[i].i)
10073 {
10074 _bfd_error_handler
10075 (_("error: %B uses iWMMXt register arguments, %B does not"),
10076 ibfd, obfd);
10077 result = FALSE;
10078 }
10079 break;
10080 case Tag_compatibility:
10081 /* Merged in target-independent code. */
10082 break;
10083 case Tag_ABI_HardFP_use:
10084 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10085 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10086 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10087 out_attr[i].i = 3;
10088 else if (in_attr[i].i > out_attr[i].i)
10089 out_attr[i].i = in_attr[i].i;
10090 break;
10091 case Tag_ABI_FP_16bit_format:
10092 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10093 {
10094 if (in_attr[i].i != out_attr[i].i)
10095 {
10096 _bfd_error_handler
10097 (_("error: fp16 format mismatch between %B and %B"),
10098 ibfd, obfd);
10099 result = FALSE;
10100 }
10101 }
10102 if (in_attr[i].i != 0)
10103 out_attr[i].i = in_attr[i].i;
10104 break;
10105
10106 case Tag_nodefaults:
10107 /* This tag is set if it exists, but the value is unused (and is
10108 typically zero). We don't actually need to do anything here -
10109 the merge happens automatically when the type flags are merged
10110 below. */
10111 break;
10112 case Tag_also_compatible_with:
10113 /* Already done in Tag_CPU_arch. */
10114 break;
10115 case Tag_conformance:
10116 /* Keep the attribute if it matches. Throw it away otherwise.
10117 No attribute means no claim to conform. */
10118 if (!in_attr[i].s || !out_attr[i].s
10119 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10120 out_attr[i].s = NULL;
10121 break;
10122
10123 default:
10124 {
10125 bfd *err_bfd = NULL;
10126
10127 /* The "known_obj_attributes" table does contain some undefined
10128 attributes. Ensure that there are unused. */
10129 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10130 err_bfd = obfd;
10131 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10132 err_bfd = ibfd;
10133
10134 if (err_bfd != NULL)
10135 {
10136 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10137 if ((i & 127) < 64)
10138 {
10139 _bfd_error_handler
10140 (_("%B: Unknown mandatory EABI object attribute %d"),
10141 err_bfd, i);
10142 bfd_set_error (bfd_error_bad_value);
10143 result = FALSE;
10144 }
10145 else
10146 {
10147 _bfd_error_handler
10148 (_("Warning: %B: Unknown EABI object attribute %d"),
10149 err_bfd, i);
10150 }
10151 }
10152
10153 /* Only pass on attributes that match in both inputs. */
10154 if (in_attr[i].i != out_attr[i].i
10155 || in_attr[i].s != out_attr[i].s
10156 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10157 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10158 {
10159 out_attr[i].i = 0;
10160 out_attr[i].s = NULL;
10161 }
10162 }
10163 }
10164
10165 /* If out_attr was copied from in_attr then it won't have a type yet. */
10166 if (in_attr[i].type && !out_attr[i].type)
10167 out_attr[i].type = in_attr[i].type;
10168 }
10169
10170 /* Merge Tag_compatibility attributes and any common GNU ones. */
10171 _bfd_elf_merge_object_attributes (ibfd, obfd);
10172
10173 /* Check for any attributes not known on ARM. */
10174 in_list = elf_other_obj_attributes_proc (ibfd);
10175 out_listp = &elf_other_obj_attributes_proc (obfd);
10176 out_list = *out_listp;
10177
10178 for (; in_list || out_list; )
10179 {
10180 bfd *err_bfd = NULL;
10181 int err_tag = 0;
10182
10183 /* The tags for each list are in numerical order. */
10184 /* If the tags are equal, then merge. */
10185 if (out_list && (!in_list || in_list->tag > out_list->tag))
10186 {
10187 /* This attribute only exists in obfd. We can't merge, and we don't
10188 know what the tag means, so delete it. */
10189 err_bfd = obfd;
10190 err_tag = out_list->tag;
10191 *out_listp = out_list->next;
10192 out_list = *out_listp;
10193 }
10194 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10195 {
10196 /* This attribute only exists in ibfd. We can't merge, and we don't
10197 know what the tag means, so ignore it. */
10198 err_bfd = ibfd;
10199 err_tag = in_list->tag;
10200 in_list = in_list->next;
10201 }
10202 else /* The tags are equal. */
10203 {
10204 /* As present, all attributes in the list are unknown, and
10205 therefore can't be merged meaningfully. */
10206 err_bfd = obfd;
10207 err_tag = out_list->tag;
10208
10209 /* Only pass on attributes that match in both inputs. */
10210 if (in_list->attr.i != out_list->attr.i
10211 || in_list->attr.s != out_list->attr.s
10212 || (in_list->attr.s && out_list->attr.s
10213 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10214 {
10215 /* No match. Delete the attribute. */
10216 *out_listp = out_list->next;
10217 out_list = *out_listp;
10218 }
10219 else
10220 {
10221 /* Matched. Keep the attribute and move to the next. */
10222 out_list = out_list->next;
10223 in_list = in_list->next;
10224 }
10225 }
10226
10227 if (err_bfd)
10228 {
10229 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10230 if ((err_tag & 127) < 64)
10231 {
10232 _bfd_error_handler
10233 (_("%B: Unknown mandatory EABI object attribute %d"),
10234 err_bfd, err_tag);
10235 bfd_set_error (bfd_error_bad_value);
10236 result = FALSE;
10237 }
10238 else
10239 {
10240 _bfd_error_handler
10241 (_("Warning: %B: Unknown EABI object attribute %d"),
10242 err_bfd, err_tag);
10243 }
10244 }
10245 }
10246 return result;
10247 }
10248
10249
10250 /* Return TRUE if the two EABI versions are incompatible. */
10251
10252 static bfd_boolean
10253 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10254 {
10255 /* v4 and v5 are the same spec before and after it was released,
10256 so allow mixing them. */
10257 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10258 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10259 return TRUE;
10260
10261 return (iver == over);
10262 }
10263
10264 /* Merge backend specific data from an object file to the output
10265 object file when linking. */
10266
10267 static bfd_boolean
10268 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10269
10270 /* Display the flags field. */
10271
10272 static bfd_boolean
10273 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10274 {
10275 FILE * file = (FILE *) ptr;
10276 unsigned long flags;
10277
10278 BFD_ASSERT (abfd != NULL && ptr != NULL);
10279
10280 /* Print normal ELF private data. */
10281 _bfd_elf_print_private_bfd_data (abfd, ptr);
10282
10283 flags = elf_elfheader (abfd)->e_flags;
10284 /* Ignore init flag - it may not be set, despite the flags field
10285 containing valid data. */
10286
10287 /* xgettext:c-format */
10288 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10289
10290 switch (EF_ARM_EABI_VERSION (flags))
10291 {
10292 case EF_ARM_EABI_UNKNOWN:
10293 /* The following flag bits are GNU extensions and not part of the
10294 official ARM ELF extended ABI. Hence they are only decoded if
10295 the EABI version is not set. */
10296 if (flags & EF_ARM_INTERWORK)
10297 fprintf (file, _(" [interworking enabled]"));
10298
10299 if (flags & EF_ARM_APCS_26)
10300 fprintf (file, " [APCS-26]");
10301 else
10302 fprintf (file, " [APCS-32]");
10303
10304 if (flags & EF_ARM_VFP_FLOAT)
10305 fprintf (file, _(" [VFP float format]"));
10306 else if (flags & EF_ARM_MAVERICK_FLOAT)
10307 fprintf (file, _(" [Maverick float format]"));
10308 else
10309 fprintf (file, _(" [FPA float format]"));
10310
10311 if (flags & EF_ARM_APCS_FLOAT)
10312 fprintf (file, _(" [floats passed in float registers]"));
10313
10314 if (flags & EF_ARM_PIC)
10315 fprintf (file, _(" [position independent]"));
10316
10317 if (flags & EF_ARM_NEW_ABI)
10318 fprintf (file, _(" [new ABI]"));
10319
10320 if (flags & EF_ARM_OLD_ABI)
10321 fprintf (file, _(" [old ABI]"));
10322
10323 if (flags & EF_ARM_SOFT_FLOAT)
10324 fprintf (file, _(" [software FP]"));
10325
10326 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10327 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10328 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10329 | EF_ARM_MAVERICK_FLOAT);
10330 break;
10331
10332 case EF_ARM_EABI_VER1:
10333 fprintf (file, _(" [Version1 EABI]"));
10334
10335 if (flags & EF_ARM_SYMSARESORTED)
10336 fprintf (file, _(" [sorted symbol table]"));
10337 else
10338 fprintf (file, _(" [unsorted symbol table]"));
10339
10340 flags &= ~ EF_ARM_SYMSARESORTED;
10341 break;
10342
10343 case EF_ARM_EABI_VER2:
10344 fprintf (file, _(" [Version2 EABI]"));
10345
10346 if (flags & EF_ARM_SYMSARESORTED)
10347 fprintf (file, _(" [sorted symbol table]"));
10348 else
10349 fprintf (file, _(" [unsorted symbol table]"));
10350
10351 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10352 fprintf (file, _(" [dynamic symbols use segment index]"));
10353
10354 if (flags & EF_ARM_MAPSYMSFIRST)
10355 fprintf (file, _(" [mapping symbols precede others]"));
10356
10357 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10358 | EF_ARM_MAPSYMSFIRST);
10359 break;
10360
10361 case EF_ARM_EABI_VER3:
10362 fprintf (file, _(" [Version3 EABI]"));
10363 break;
10364
10365 case EF_ARM_EABI_VER4:
10366 fprintf (file, _(" [Version4 EABI]"));
10367 goto eabi;
10368
10369 case EF_ARM_EABI_VER5:
10370 fprintf (file, _(" [Version5 EABI]"));
10371 eabi:
10372 if (flags & EF_ARM_BE8)
10373 fprintf (file, _(" [BE8]"));
10374
10375 if (flags & EF_ARM_LE8)
10376 fprintf (file, _(" [LE8]"));
10377
10378 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10379 break;
10380
10381 default:
10382 fprintf (file, _(" <EABI version unrecognised>"));
10383 break;
10384 }
10385
10386 flags &= ~ EF_ARM_EABIMASK;
10387
10388 if (flags & EF_ARM_RELEXEC)
10389 fprintf (file, _(" [relocatable executable]"));
10390
10391 if (flags & EF_ARM_HASENTRY)
10392 fprintf (file, _(" [has entry point]"));
10393
10394 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10395
10396 if (flags)
10397 fprintf (file, _("<Unrecognised flag bits set>"));
10398
10399 fputc ('\n', file);
10400
10401 return TRUE;
10402 }
10403
10404 static int
10405 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10406 {
10407 switch (ELF_ST_TYPE (elf_sym->st_info))
10408 {
10409 case STT_ARM_TFUNC:
10410 return ELF_ST_TYPE (elf_sym->st_info);
10411
10412 case STT_ARM_16BIT:
10413 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10414 This allows us to distinguish between data used by Thumb instructions
10415 and non-data (which is probably code) inside Thumb regions of an
10416 executable. */
10417 if (type != STT_OBJECT && type != STT_TLS)
10418 return ELF_ST_TYPE (elf_sym->st_info);
10419 break;
10420
10421 default:
10422 break;
10423 }
10424
10425 return type;
10426 }
10427
10428 static asection *
10429 elf32_arm_gc_mark_hook (asection *sec,
10430 struct bfd_link_info *info,
10431 Elf_Internal_Rela *rel,
10432 struct elf_link_hash_entry *h,
10433 Elf_Internal_Sym *sym)
10434 {
10435 if (h != NULL)
10436 switch (ELF32_R_TYPE (rel->r_info))
10437 {
10438 case R_ARM_GNU_VTINHERIT:
10439 case R_ARM_GNU_VTENTRY:
10440 return NULL;
10441 }
10442
10443 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10444 }
10445
10446 /* Update the got entry reference counts for the section being removed. */
10447
10448 static bfd_boolean
10449 elf32_arm_gc_sweep_hook (bfd * abfd,
10450 struct bfd_link_info * info,
10451 asection * sec,
10452 const Elf_Internal_Rela * relocs)
10453 {
10454 Elf_Internal_Shdr *symtab_hdr;
10455 struct elf_link_hash_entry **sym_hashes;
10456 bfd_signed_vma *local_got_refcounts;
10457 const Elf_Internal_Rela *rel, *relend;
10458 struct elf32_arm_link_hash_table * globals;
10459
10460 if (info->relocatable)
10461 return TRUE;
10462
10463 globals = elf32_arm_hash_table (info);
10464
10465 elf_section_data (sec)->local_dynrel = NULL;
10466
10467 symtab_hdr = & elf_symtab_hdr (abfd);
10468 sym_hashes = elf_sym_hashes (abfd);
10469 local_got_refcounts = elf_local_got_refcounts (abfd);
10470
10471 check_use_blx (globals);
10472
10473 relend = relocs + sec->reloc_count;
10474 for (rel = relocs; rel < relend; rel++)
10475 {
10476 unsigned long r_symndx;
10477 struct elf_link_hash_entry *h = NULL;
10478 int r_type;
10479
10480 r_symndx = ELF32_R_SYM (rel->r_info);
10481 if (r_symndx >= symtab_hdr->sh_info)
10482 {
10483 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10484 while (h->root.type == bfd_link_hash_indirect
10485 || h->root.type == bfd_link_hash_warning)
10486 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10487 }
10488
10489 r_type = ELF32_R_TYPE (rel->r_info);
10490 r_type = arm_real_reloc_type (globals, r_type);
10491 switch (r_type)
10492 {
10493 case R_ARM_GOT32:
10494 case R_ARM_GOT_PREL:
10495 case R_ARM_TLS_GD32:
10496 case R_ARM_TLS_IE32:
10497 if (h != NULL)
10498 {
10499 if (h->got.refcount > 0)
10500 h->got.refcount -= 1;
10501 }
10502 else if (local_got_refcounts != NULL)
10503 {
10504 if (local_got_refcounts[r_symndx] > 0)
10505 local_got_refcounts[r_symndx] -= 1;
10506 }
10507 break;
10508
10509 case R_ARM_TLS_LDM32:
10510 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10511 break;
10512
10513 case R_ARM_ABS32:
10514 case R_ARM_ABS32_NOI:
10515 case R_ARM_REL32:
10516 case R_ARM_REL32_NOI:
10517 case R_ARM_PC24:
10518 case R_ARM_PLT32:
10519 case R_ARM_CALL:
10520 case R_ARM_JUMP24:
10521 case R_ARM_PREL31:
10522 case R_ARM_THM_CALL:
10523 case R_ARM_THM_JUMP24:
10524 case R_ARM_THM_JUMP19:
10525 case R_ARM_MOVW_ABS_NC:
10526 case R_ARM_MOVT_ABS:
10527 case R_ARM_MOVW_PREL_NC:
10528 case R_ARM_MOVT_PREL:
10529 case R_ARM_THM_MOVW_ABS_NC:
10530 case R_ARM_THM_MOVT_ABS:
10531 case R_ARM_THM_MOVW_PREL_NC:
10532 case R_ARM_THM_MOVT_PREL:
10533 /* Should the interworking branches be here also? */
10534
10535 if (h != NULL)
10536 {
10537 struct elf32_arm_link_hash_entry *eh;
10538 struct elf32_arm_relocs_copied **pp;
10539 struct elf32_arm_relocs_copied *p;
10540
10541 eh = (struct elf32_arm_link_hash_entry *) h;
10542
10543 if (h->plt.refcount > 0)
10544 {
10545 h->plt.refcount -= 1;
10546 if (r_type == R_ARM_THM_CALL)
10547 eh->plt_maybe_thumb_refcount--;
10548
10549 if (r_type == R_ARM_THM_JUMP24
10550 || r_type == R_ARM_THM_JUMP19)
10551 eh->plt_thumb_refcount--;
10552 }
10553
10554 if (r_type == R_ARM_ABS32
10555 || r_type == R_ARM_REL32
10556 || r_type == R_ARM_ABS32_NOI
10557 || r_type == R_ARM_REL32_NOI)
10558 {
10559 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10560 pp = &p->next)
10561 if (p->section == sec)
10562 {
10563 p->count -= 1;
10564 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10565 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10566 p->pc_count -= 1;
10567 if (p->count == 0)
10568 *pp = p->next;
10569 break;
10570 }
10571 }
10572 }
10573 break;
10574
10575 default:
10576 break;
10577 }
10578 }
10579
10580 return TRUE;
10581 }
10582
10583 /* Look through the relocs for a section during the first phase. */
10584
10585 static bfd_boolean
10586 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10587 asection *sec, const Elf_Internal_Rela *relocs)
10588 {
10589 Elf_Internal_Shdr *symtab_hdr;
10590 struct elf_link_hash_entry **sym_hashes;
10591 const Elf_Internal_Rela *rel;
10592 const Elf_Internal_Rela *rel_end;
10593 bfd *dynobj;
10594 asection *sreloc;
10595 bfd_vma *local_got_offsets;
10596 struct elf32_arm_link_hash_table *htab;
10597 bfd_boolean needs_plt;
10598 unsigned long nsyms;
10599
10600 if (info->relocatable)
10601 return TRUE;
10602
10603 BFD_ASSERT (is_arm_elf (abfd));
10604
10605 htab = elf32_arm_hash_table (info);
10606 sreloc = NULL;
10607
10608 /* Create dynamic sections for relocatable executables so that we can
10609 copy relocations. */
10610 if (htab->root.is_relocatable_executable
10611 && ! htab->root.dynamic_sections_created)
10612 {
10613 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10614 return FALSE;
10615 }
10616
10617 dynobj = elf_hash_table (info)->dynobj;
10618 local_got_offsets = elf_local_got_offsets (abfd);
10619
10620 symtab_hdr = & elf_symtab_hdr (abfd);
10621 sym_hashes = elf_sym_hashes (abfd);
10622 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10623
10624 rel_end = relocs + sec->reloc_count;
10625 for (rel = relocs; rel < rel_end; rel++)
10626 {
10627 struct elf_link_hash_entry *h;
10628 struct elf32_arm_link_hash_entry *eh;
10629 unsigned long r_symndx;
10630 int r_type;
10631
10632 r_symndx = ELF32_R_SYM (rel->r_info);
10633 r_type = ELF32_R_TYPE (rel->r_info);
10634 r_type = arm_real_reloc_type (htab, r_type);
10635
10636 if (r_symndx >= nsyms
10637 /* PR 9934: It is possible to have relocations that do not
10638 refer to symbols, thus it is also possible to have an
10639 object file containing relocations but no symbol table. */
10640 && (r_symndx > 0 || nsyms > 0))
10641 {
10642 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10643 r_symndx);
10644 return FALSE;
10645 }
10646
10647 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10648 h = NULL;
10649 else
10650 {
10651 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10652 while (h->root.type == bfd_link_hash_indirect
10653 || h->root.type == bfd_link_hash_warning)
10654 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10655 }
10656
10657 eh = (struct elf32_arm_link_hash_entry *) h;
10658
10659 switch (r_type)
10660 {
10661 case R_ARM_GOT32:
10662 case R_ARM_GOT_PREL:
10663 case R_ARM_TLS_GD32:
10664 case R_ARM_TLS_IE32:
10665 /* This symbol requires a global offset table entry. */
10666 {
10667 int tls_type, old_tls_type;
10668
10669 switch (r_type)
10670 {
10671 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10672 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10673 default: tls_type = GOT_NORMAL; break;
10674 }
10675
10676 if (h != NULL)
10677 {
10678 h->got.refcount++;
10679 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10680 }
10681 else
10682 {
10683 bfd_signed_vma *local_got_refcounts;
10684
10685 /* This is a global offset table entry for a local symbol. */
10686 local_got_refcounts = elf_local_got_refcounts (abfd);
10687 if (local_got_refcounts == NULL)
10688 {
10689 bfd_size_type size;
10690
10691 size = symtab_hdr->sh_info;
10692 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10693 local_got_refcounts = (bfd_signed_vma *)
10694 bfd_zalloc (abfd, size);
10695 if (local_got_refcounts == NULL)
10696 return FALSE;
10697 elf_local_got_refcounts (abfd) = local_got_refcounts;
10698 elf32_arm_local_got_tls_type (abfd)
10699 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10700 }
10701 local_got_refcounts[r_symndx] += 1;
10702 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10703 }
10704
10705 /* We will already have issued an error message if there is a
10706 TLS / non-TLS mismatch, based on the symbol type. We don't
10707 support any linker relaxations. So just combine any TLS
10708 types needed. */
10709 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10710 && tls_type != GOT_NORMAL)
10711 tls_type |= old_tls_type;
10712
10713 if (old_tls_type != tls_type)
10714 {
10715 if (h != NULL)
10716 elf32_arm_hash_entry (h)->tls_type = tls_type;
10717 else
10718 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10719 }
10720 }
10721 /* Fall through. */
10722
10723 case R_ARM_TLS_LDM32:
10724 if (r_type == R_ARM_TLS_LDM32)
10725 htab->tls_ldm_got.refcount++;
10726 /* Fall through. */
10727
10728 case R_ARM_GOTOFF32:
10729 case R_ARM_GOTPC:
10730 if (htab->sgot == NULL)
10731 {
10732 if (htab->root.dynobj == NULL)
10733 htab->root.dynobj = abfd;
10734 if (!create_got_section (htab->root.dynobj, info))
10735 return FALSE;
10736 }
10737 break;
10738
10739 case R_ARM_ABS12:
10740 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10741 ldr __GOTT_INDEX__ offsets. */
10742 if (!htab->vxworks_p)
10743 break;
10744 /* Fall through. */
10745
10746 case R_ARM_PC24:
10747 case R_ARM_PLT32:
10748 case R_ARM_CALL:
10749 case R_ARM_JUMP24:
10750 case R_ARM_PREL31:
10751 case R_ARM_THM_CALL:
10752 case R_ARM_THM_JUMP24:
10753 case R_ARM_THM_JUMP19:
10754 needs_plt = 1;
10755 goto normal_reloc;
10756
10757 case R_ARM_MOVW_ABS_NC:
10758 case R_ARM_MOVT_ABS:
10759 case R_ARM_THM_MOVW_ABS_NC:
10760 case R_ARM_THM_MOVT_ABS:
10761 if (info->shared)
10762 {
10763 (*_bfd_error_handler)
10764 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10765 abfd, elf32_arm_howto_table_1[r_type].name,
10766 (h) ? h->root.root.string : "a local symbol");
10767 bfd_set_error (bfd_error_bad_value);
10768 return FALSE;
10769 }
10770
10771 /* Fall through. */
10772 case R_ARM_ABS32:
10773 case R_ARM_ABS32_NOI:
10774 case R_ARM_REL32:
10775 case R_ARM_REL32_NOI:
10776 case R_ARM_MOVW_PREL_NC:
10777 case R_ARM_MOVT_PREL:
10778 case R_ARM_THM_MOVW_PREL_NC:
10779 case R_ARM_THM_MOVT_PREL:
10780 needs_plt = 0;
10781 normal_reloc:
10782
10783 /* Should the interworking branches be listed here? */
10784 if (h != NULL)
10785 {
10786 /* If this reloc is in a read-only section, we might
10787 need a copy reloc. We can't check reliably at this
10788 stage whether the section is read-only, as input
10789 sections have not yet been mapped to output sections.
10790 Tentatively set the flag for now, and correct in
10791 adjust_dynamic_symbol. */
10792 if (!info->shared)
10793 h->non_got_ref = 1;
10794
10795 /* We may need a .plt entry if the function this reloc
10796 refers to is in a different object. We can't tell for
10797 sure yet, because something later might force the
10798 symbol local. */
10799 if (needs_plt)
10800 h->needs_plt = 1;
10801
10802 /* If we create a PLT entry, this relocation will reference
10803 it, even if it's an ABS32 relocation. */
10804 h->plt.refcount += 1;
10805
10806 /* It's too early to use htab->use_blx here, so we have to
10807 record possible blx references separately from
10808 relocs that definitely need a thumb stub. */
10809
10810 if (r_type == R_ARM_THM_CALL)
10811 eh->plt_maybe_thumb_refcount += 1;
10812
10813 if (r_type == R_ARM_THM_JUMP24
10814 || r_type == R_ARM_THM_JUMP19)
10815 eh->plt_thumb_refcount += 1;
10816 }
10817
10818 /* If we are creating a shared library or relocatable executable,
10819 and this is a reloc against a global symbol, or a non PC
10820 relative reloc against a local symbol, then we need to copy
10821 the reloc into the shared library. However, if we are linking
10822 with -Bsymbolic, we do not need to copy a reloc against a
10823 global symbol which is defined in an object we are
10824 including in the link (i.e., DEF_REGULAR is set). At
10825 this point we have not seen all the input files, so it is
10826 possible that DEF_REGULAR is not set now but will be set
10827 later (it is never cleared). We account for that
10828 possibility below by storing information in the
10829 relocs_copied field of the hash table entry. */
10830 if ((info->shared || htab->root.is_relocatable_executable)
10831 && (sec->flags & SEC_ALLOC) != 0
10832 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10833 || (h != NULL && ! h->needs_plt
10834 && (! info->symbolic || ! h->def_regular))))
10835 {
10836 struct elf32_arm_relocs_copied *p, **head;
10837
10838 /* When creating a shared object, we must copy these
10839 reloc types into the output file. We create a reloc
10840 section in dynobj and make room for this reloc. */
10841 if (sreloc == NULL)
10842 {
10843 sreloc = _bfd_elf_make_dynamic_reloc_section
10844 (sec, dynobj, 2, abfd, ! htab->use_rel);
10845
10846 if (sreloc == NULL)
10847 return FALSE;
10848
10849 /* BPABI objects never have dynamic relocations mapped. */
10850 if (htab->symbian_p)
10851 {
10852 flagword flags;
10853
10854 flags = bfd_get_section_flags (dynobj, sreloc);
10855 flags &= ~(SEC_LOAD | SEC_ALLOC);
10856 bfd_set_section_flags (dynobj, sreloc, flags);
10857 }
10858 }
10859
10860 /* If this is a global symbol, we count the number of
10861 relocations we need for this symbol. */
10862 if (h != NULL)
10863 {
10864 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10865 }
10866 else
10867 {
10868 /* Track dynamic relocs needed for local syms too.
10869 We really need local syms available to do this
10870 easily. Oh well. */
10871 asection *s;
10872 void *vpp;
10873 Elf_Internal_Sym *isym;
10874
10875 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10876 abfd, r_symndx);
10877 if (isym == NULL)
10878 return FALSE;
10879
10880 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10881 if (s == NULL)
10882 s = sec;
10883
10884 vpp = &elf_section_data (s)->local_dynrel;
10885 head = (struct elf32_arm_relocs_copied **) vpp;
10886 }
10887
10888 p = *head;
10889 if (p == NULL || p->section != sec)
10890 {
10891 bfd_size_type amt = sizeof *p;
10892
10893 p = (struct elf32_arm_relocs_copied *)
10894 bfd_alloc (htab->root.dynobj, amt);
10895 if (p == NULL)
10896 return FALSE;
10897 p->next = *head;
10898 *head = p;
10899 p->section = sec;
10900 p->count = 0;
10901 p->pc_count = 0;
10902 }
10903
10904 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10905 p->pc_count += 1;
10906 p->count += 1;
10907 }
10908 break;
10909
10910 /* This relocation describes the C++ object vtable hierarchy.
10911 Reconstruct it for later use during GC. */
10912 case R_ARM_GNU_VTINHERIT:
10913 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10914 return FALSE;
10915 break;
10916
10917 /* This relocation describes which C++ vtable entries are actually
10918 used. Record for later use during GC. */
10919 case R_ARM_GNU_VTENTRY:
10920 BFD_ASSERT (h != NULL);
10921 if (h != NULL
10922 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10923 return FALSE;
10924 break;
10925 }
10926 }
10927
10928 return TRUE;
10929 }
10930
10931 /* Unwinding tables are not referenced directly. This pass marks them as
10932 required if the corresponding code section is marked. */
10933
10934 static bfd_boolean
10935 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10936 elf_gc_mark_hook_fn gc_mark_hook)
10937 {
10938 bfd *sub;
10939 Elf_Internal_Shdr **elf_shdrp;
10940 bfd_boolean again;
10941
10942 /* Marking EH data may cause additional code sections to be marked,
10943 requiring multiple passes. */
10944 again = TRUE;
10945 while (again)
10946 {
10947 again = FALSE;
10948 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10949 {
10950 asection *o;
10951
10952 if (! is_arm_elf (sub))
10953 continue;
10954
10955 elf_shdrp = elf_elfsections (sub);
10956 for (o = sub->sections; o != NULL; o = o->next)
10957 {
10958 Elf_Internal_Shdr *hdr;
10959
10960 hdr = &elf_section_data (o)->this_hdr;
10961 if (hdr->sh_type == SHT_ARM_EXIDX
10962 && hdr->sh_link
10963 && hdr->sh_link < elf_numsections (sub)
10964 && !o->gc_mark
10965 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10966 {
10967 again = TRUE;
10968 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10969 return FALSE;
10970 }
10971 }
10972 }
10973 }
10974
10975 return TRUE;
10976 }
10977
10978 /* Treat mapping symbols as special target symbols. */
10979
10980 static bfd_boolean
10981 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
10982 {
10983 return bfd_is_arm_special_symbol_name (sym->name,
10984 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
10985 }
10986
10987 /* This is a copy of elf_find_function() from elf.c except that
10988 ARM mapping symbols are ignored when looking for function names
10989 and STT_ARM_TFUNC is considered to a function type. */
10990
10991 static bfd_boolean
10992 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
10993 asection * section,
10994 asymbol ** symbols,
10995 bfd_vma offset,
10996 const char ** filename_ptr,
10997 const char ** functionname_ptr)
10998 {
10999 const char * filename = NULL;
11000 asymbol * func = NULL;
11001 bfd_vma low_func = 0;
11002 asymbol ** p;
11003
11004 for (p = symbols; *p != NULL; p++)
11005 {
11006 elf_symbol_type *q;
11007
11008 q = (elf_symbol_type *) *p;
11009
11010 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11011 {
11012 default:
11013 break;
11014 case STT_FILE:
11015 filename = bfd_asymbol_name (&q->symbol);
11016 break;
11017 case STT_FUNC:
11018 case STT_ARM_TFUNC:
11019 case STT_NOTYPE:
11020 /* Skip mapping symbols. */
11021 if ((q->symbol.flags & BSF_LOCAL)
11022 && bfd_is_arm_special_symbol_name (q->symbol.name,
11023 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11024 continue;
11025 /* Fall through. */
11026 if (bfd_get_section (&q->symbol) == section
11027 && q->symbol.value >= low_func
11028 && q->symbol.value <= offset)
11029 {
11030 func = (asymbol *) q;
11031 low_func = q->symbol.value;
11032 }
11033 break;
11034 }
11035 }
11036
11037 if (func == NULL)
11038 return FALSE;
11039
11040 if (filename_ptr)
11041 *filename_ptr = filename;
11042 if (functionname_ptr)
11043 *functionname_ptr = bfd_asymbol_name (func);
11044
11045 return TRUE;
11046 }
11047
11048
11049 /* Find the nearest line to a particular section and offset, for error
11050 reporting. This code is a duplicate of the code in elf.c, except
11051 that it uses arm_elf_find_function. */
11052
11053 static bfd_boolean
11054 elf32_arm_find_nearest_line (bfd * abfd,
11055 asection * section,
11056 asymbol ** symbols,
11057 bfd_vma offset,
11058 const char ** filename_ptr,
11059 const char ** functionname_ptr,
11060 unsigned int * line_ptr)
11061 {
11062 bfd_boolean found = FALSE;
11063
11064 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11065
11066 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11067 filename_ptr, functionname_ptr,
11068 line_ptr, 0,
11069 & elf_tdata (abfd)->dwarf2_find_line_info))
11070 {
11071 if (!*functionname_ptr)
11072 arm_elf_find_function (abfd, section, symbols, offset,
11073 *filename_ptr ? NULL : filename_ptr,
11074 functionname_ptr);
11075
11076 return TRUE;
11077 }
11078
11079 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11080 & found, filename_ptr,
11081 functionname_ptr, line_ptr,
11082 & elf_tdata (abfd)->line_info))
11083 return FALSE;
11084
11085 if (found && (*functionname_ptr || *line_ptr))
11086 return TRUE;
11087
11088 if (symbols == NULL)
11089 return FALSE;
11090
11091 if (! arm_elf_find_function (abfd, section, symbols, offset,
11092 filename_ptr, functionname_ptr))
11093 return FALSE;
11094
11095 *line_ptr = 0;
11096 return TRUE;
11097 }
11098
11099 static bfd_boolean
11100 elf32_arm_find_inliner_info (bfd * abfd,
11101 const char ** filename_ptr,
11102 const char ** functionname_ptr,
11103 unsigned int * line_ptr)
11104 {
11105 bfd_boolean found;
11106 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11107 functionname_ptr, line_ptr,
11108 & elf_tdata (abfd)->dwarf2_find_line_info);
11109 return found;
11110 }
11111
11112 /* Adjust a symbol defined by a dynamic object and referenced by a
11113 regular object. The current definition is in some section of the
11114 dynamic object, but we're not including those sections. We have to
11115 change the definition to something the rest of the link can
11116 understand. */
11117
11118 static bfd_boolean
11119 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11120 struct elf_link_hash_entry * h)
11121 {
11122 bfd * dynobj;
11123 asection * s;
11124 struct elf32_arm_link_hash_entry * eh;
11125 struct elf32_arm_link_hash_table *globals;
11126
11127 globals = elf32_arm_hash_table (info);
11128 dynobj = elf_hash_table (info)->dynobj;
11129
11130 /* Make sure we know what is going on here. */
11131 BFD_ASSERT (dynobj != NULL
11132 && (h->needs_plt
11133 || h->u.weakdef != NULL
11134 || (h->def_dynamic
11135 && h->ref_regular
11136 && !h->def_regular)));
11137
11138 eh = (struct elf32_arm_link_hash_entry *) h;
11139
11140 /* If this is a function, put it in the procedure linkage table. We
11141 will fill in the contents of the procedure linkage table later,
11142 when we know the address of the .got section. */
11143 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11144 || h->needs_plt)
11145 {
11146 if (h->plt.refcount <= 0
11147 || SYMBOL_CALLS_LOCAL (info, h)
11148 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11149 && h->root.type == bfd_link_hash_undefweak))
11150 {
11151 /* This case can occur if we saw a PLT32 reloc in an input
11152 file, but the symbol was never referred to by a dynamic
11153 object, or if all references were garbage collected. In
11154 such a case, we don't actually need to build a procedure
11155 linkage table, and we can just do a PC24 reloc instead. */
11156 h->plt.offset = (bfd_vma) -1;
11157 eh->plt_thumb_refcount = 0;
11158 eh->plt_maybe_thumb_refcount = 0;
11159 h->needs_plt = 0;
11160 }
11161
11162 return TRUE;
11163 }
11164 else
11165 {
11166 /* It's possible that we incorrectly decided a .plt reloc was
11167 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11168 in check_relocs. We can't decide accurately between function
11169 and non-function syms in check-relocs; Objects loaded later in
11170 the link may change h->type. So fix it now. */
11171 h->plt.offset = (bfd_vma) -1;
11172 eh->plt_thumb_refcount = 0;
11173 eh->plt_maybe_thumb_refcount = 0;
11174 }
11175
11176 /* If this is a weak symbol, and there is a real definition, the
11177 processor independent code will have arranged for us to see the
11178 real definition first, and we can just use the same value. */
11179 if (h->u.weakdef != NULL)
11180 {
11181 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11182 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11183 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11184 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11185 return TRUE;
11186 }
11187
11188 /* If there are no non-GOT references, we do not need a copy
11189 relocation. */
11190 if (!h->non_got_ref)
11191 return TRUE;
11192
11193 /* This is a reference to a symbol defined by a dynamic object which
11194 is not a function. */
11195
11196 /* If we are creating a shared library, we must presume that the
11197 only references to the symbol are via the global offset table.
11198 For such cases we need not do anything here; the relocations will
11199 be handled correctly by relocate_section. Relocatable executables
11200 can reference data in shared objects directly, so we don't need to
11201 do anything here. */
11202 if (info->shared || globals->root.is_relocatable_executable)
11203 return TRUE;
11204
11205 if (h->size == 0)
11206 {
11207 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11208 h->root.root.string);
11209 return TRUE;
11210 }
11211
11212 /* We must allocate the symbol in our .dynbss section, which will
11213 become part of the .bss section of the executable. There will be
11214 an entry for this symbol in the .dynsym section. The dynamic
11215 object will contain position independent code, so all references
11216 from the dynamic object to this symbol will go through the global
11217 offset table. The dynamic linker will use the .dynsym entry to
11218 determine the address it must put in the global offset table, so
11219 both the dynamic object and the regular object will refer to the
11220 same memory location for the variable. */
11221 s = bfd_get_section_by_name (dynobj, ".dynbss");
11222 BFD_ASSERT (s != NULL);
11223
11224 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11225 copy the initial value out of the dynamic object and into the
11226 runtime process image. We need to remember the offset into the
11227 .rel(a).bss section we are going to use. */
11228 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11229 {
11230 asection *srel;
11231
11232 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11233 BFD_ASSERT (srel != NULL);
11234 srel->size += RELOC_SIZE (globals);
11235 h->needs_copy = 1;
11236 }
11237
11238 return _bfd_elf_adjust_dynamic_copy (h, s);
11239 }
11240
11241 /* Allocate space in .plt, .got and associated reloc sections for
11242 dynamic relocs. */
11243
11244 static bfd_boolean
11245 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11246 {
11247 struct bfd_link_info *info;
11248 struct elf32_arm_link_hash_table *htab;
11249 struct elf32_arm_link_hash_entry *eh;
11250 struct elf32_arm_relocs_copied *p;
11251 bfd_signed_vma thumb_refs;
11252
11253 eh = (struct elf32_arm_link_hash_entry *) h;
11254
11255 if (h->root.type == bfd_link_hash_indirect)
11256 return TRUE;
11257
11258 if (h->root.type == bfd_link_hash_warning)
11259 /* When warning symbols are created, they **replace** the "real"
11260 entry in the hash table, thus we never get to see the real
11261 symbol in a hash traversal. So look at it now. */
11262 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11263
11264 info = (struct bfd_link_info *) inf;
11265 htab = elf32_arm_hash_table (info);
11266
11267 if (htab->root.dynamic_sections_created
11268 && h->plt.refcount > 0)
11269 {
11270 /* Make sure this symbol is output as a dynamic symbol.
11271 Undefined weak syms won't yet be marked as dynamic. */
11272 if (h->dynindx == -1
11273 && !h->forced_local)
11274 {
11275 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11276 return FALSE;
11277 }
11278
11279 if (info->shared
11280 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11281 {
11282 asection *s = htab->splt;
11283
11284 /* If this is the first .plt entry, make room for the special
11285 first entry. */
11286 if (s->size == 0)
11287 s->size += htab->plt_header_size;
11288
11289 h->plt.offset = s->size;
11290
11291 /* If we will insert a Thumb trampoline before this PLT, leave room
11292 for it. */
11293 thumb_refs = eh->plt_thumb_refcount;
11294 if (!htab->use_blx)
11295 thumb_refs += eh->plt_maybe_thumb_refcount;
11296
11297 if (thumb_refs > 0)
11298 {
11299 h->plt.offset += PLT_THUMB_STUB_SIZE;
11300 s->size += PLT_THUMB_STUB_SIZE;
11301 }
11302
11303 /* If this symbol is not defined in a regular file, and we are
11304 not generating a shared library, then set the symbol to this
11305 location in the .plt. This is required to make function
11306 pointers compare as equal between the normal executable and
11307 the shared library. */
11308 if (! info->shared
11309 && !h->def_regular)
11310 {
11311 h->root.u.def.section = s;
11312 h->root.u.def.value = h->plt.offset;
11313
11314 /* Make sure the function is not marked as Thumb, in case
11315 it is the target of an ABS32 relocation, which will
11316 point to the PLT entry. */
11317 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11318 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11319 }
11320
11321 /* Make room for this entry. */
11322 s->size += htab->plt_entry_size;
11323
11324 if (!htab->symbian_p)
11325 {
11326 /* We also need to make an entry in the .got.plt section, which
11327 will be placed in the .got section by the linker script. */
11328 eh->plt_got_offset = htab->sgotplt->size;
11329 htab->sgotplt->size += 4;
11330 }
11331
11332 /* We also need to make an entry in the .rel(a).plt section. */
11333 htab->srelplt->size += RELOC_SIZE (htab);
11334
11335 /* VxWorks executables have a second set of relocations for
11336 each PLT entry. They go in a separate relocation section,
11337 which is processed by the kernel loader. */
11338 if (htab->vxworks_p && !info->shared)
11339 {
11340 /* There is a relocation for the initial PLT entry:
11341 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11342 if (h->plt.offset == htab->plt_header_size)
11343 htab->srelplt2->size += RELOC_SIZE (htab);
11344
11345 /* There are two extra relocations for each subsequent
11346 PLT entry: an R_ARM_32 relocation for the GOT entry,
11347 and an R_ARM_32 relocation for the PLT entry. */
11348 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11349 }
11350 }
11351 else
11352 {
11353 h->plt.offset = (bfd_vma) -1;
11354 h->needs_plt = 0;
11355 }
11356 }
11357 else
11358 {
11359 h->plt.offset = (bfd_vma) -1;
11360 h->needs_plt = 0;
11361 }
11362
11363 if (h->got.refcount > 0)
11364 {
11365 asection *s;
11366 bfd_boolean dyn;
11367 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11368 int indx;
11369
11370 /* Make sure this symbol is output as a dynamic symbol.
11371 Undefined weak syms won't yet be marked as dynamic. */
11372 if (h->dynindx == -1
11373 && !h->forced_local)
11374 {
11375 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11376 return FALSE;
11377 }
11378
11379 if (!htab->symbian_p)
11380 {
11381 s = htab->sgot;
11382 h->got.offset = s->size;
11383
11384 if (tls_type == GOT_UNKNOWN)
11385 abort ();
11386
11387 if (tls_type == GOT_NORMAL)
11388 /* Non-TLS symbols need one GOT slot. */
11389 s->size += 4;
11390 else
11391 {
11392 if (tls_type & GOT_TLS_GD)
11393 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11394 s->size += 8;
11395 if (tls_type & GOT_TLS_IE)
11396 /* R_ARM_TLS_IE32 needs one GOT slot. */
11397 s->size += 4;
11398 }
11399
11400 dyn = htab->root.dynamic_sections_created;
11401
11402 indx = 0;
11403 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11404 && (!info->shared
11405 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11406 indx = h->dynindx;
11407
11408 if (tls_type != GOT_NORMAL
11409 && (info->shared || indx != 0)
11410 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11411 || h->root.type != bfd_link_hash_undefweak))
11412 {
11413 if (tls_type & GOT_TLS_IE)
11414 htab->srelgot->size += RELOC_SIZE (htab);
11415
11416 if (tls_type & GOT_TLS_GD)
11417 htab->srelgot->size += RELOC_SIZE (htab);
11418
11419 if ((tls_type & GOT_TLS_GD) && indx != 0)
11420 htab->srelgot->size += RELOC_SIZE (htab);
11421 }
11422 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11423 || h->root.type != bfd_link_hash_undefweak)
11424 && (info->shared
11425 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11426 htab->srelgot->size += RELOC_SIZE (htab);
11427 }
11428 }
11429 else
11430 h->got.offset = (bfd_vma) -1;
11431
11432 /* Allocate stubs for exported Thumb functions on v4t. */
11433 if (!htab->use_blx && h->dynindx != -1
11434 && h->def_regular
11435 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11436 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11437 {
11438 struct elf_link_hash_entry * th;
11439 struct bfd_link_hash_entry * bh;
11440 struct elf_link_hash_entry * myh;
11441 char name[1024];
11442 asection *s;
11443 bh = NULL;
11444 /* Create a new symbol to regist the real location of the function. */
11445 s = h->root.u.def.section;
11446 sprintf (name, "__real_%s", h->root.root.string);
11447 _bfd_generic_link_add_one_symbol (info, s->owner,
11448 name, BSF_GLOBAL, s,
11449 h->root.u.def.value,
11450 NULL, TRUE, FALSE, &bh);
11451
11452 myh = (struct elf_link_hash_entry *) bh;
11453 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11454 myh->forced_local = 1;
11455 eh->export_glue = myh;
11456 th = record_arm_to_thumb_glue (info, h);
11457 /* Point the symbol at the stub. */
11458 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11459 h->root.u.def.section = th->root.u.def.section;
11460 h->root.u.def.value = th->root.u.def.value & ~1;
11461 }
11462
11463 if (eh->relocs_copied == NULL)
11464 return TRUE;
11465
11466 /* In the shared -Bsymbolic case, discard space allocated for
11467 dynamic pc-relative relocs against symbols which turn out to be
11468 defined in regular objects. For the normal shared case, discard
11469 space for pc-relative relocs that have become local due to symbol
11470 visibility changes. */
11471
11472 if (info->shared || htab->root.is_relocatable_executable)
11473 {
11474 /* The only relocs that use pc_count are R_ARM_REL32 and
11475 R_ARM_REL32_NOI, which will appear on something like
11476 ".long foo - .". We want calls to protected symbols to resolve
11477 directly to the function rather than going via the plt. If people
11478 want function pointer comparisons to work as expected then they
11479 should avoid writing assembly like ".long foo - .". */
11480 if (SYMBOL_CALLS_LOCAL (info, h))
11481 {
11482 struct elf32_arm_relocs_copied **pp;
11483
11484 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11485 {
11486 p->count -= p->pc_count;
11487 p->pc_count = 0;
11488 if (p->count == 0)
11489 *pp = p->next;
11490 else
11491 pp = &p->next;
11492 }
11493 }
11494
11495 if (elf32_arm_hash_table (info)->vxworks_p)
11496 {
11497 struct elf32_arm_relocs_copied **pp;
11498
11499 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11500 {
11501 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11502 *pp = p->next;
11503 else
11504 pp = &p->next;
11505 }
11506 }
11507
11508 /* Also discard relocs on undefined weak syms with non-default
11509 visibility. */
11510 if (eh->relocs_copied != NULL
11511 && h->root.type == bfd_link_hash_undefweak)
11512 {
11513 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11514 eh->relocs_copied = NULL;
11515
11516 /* Make sure undefined weak symbols are output as a dynamic
11517 symbol in PIEs. */
11518 else if (h->dynindx == -1
11519 && !h->forced_local)
11520 {
11521 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11522 return FALSE;
11523 }
11524 }
11525
11526 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11527 && h->root.type == bfd_link_hash_new)
11528 {
11529 /* Output absolute symbols so that we can create relocations
11530 against them. For normal symbols we output a relocation
11531 against the section that contains them. */
11532 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11533 return FALSE;
11534 }
11535
11536 }
11537 else
11538 {
11539 /* For the non-shared case, discard space for relocs against
11540 symbols which turn out to need copy relocs or are not
11541 dynamic. */
11542
11543 if (!h->non_got_ref
11544 && ((h->def_dynamic
11545 && !h->def_regular)
11546 || (htab->root.dynamic_sections_created
11547 && (h->root.type == bfd_link_hash_undefweak
11548 || h->root.type == bfd_link_hash_undefined))))
11549 {
11550 /* Make sure this symbol is output as a dynamic symbol.
11551 Undefined weak syms won't yet be marked as dynamic. */
11552 if (h->dynindx == -1
11553 && !h->forced_local)
11554 {
11555 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11556 return FALSE;
11557 }
11558
11559 /* If that succeeded, we know we'll be keeping all the
11560 relocs. */
11561 if (h->dynindx != -1)
11562 goto keep;
11563 }
11564
11565 eh->relocs_copied = NULL;
11566
11567 keep: ;
11568 }
11569
11570 /* Finally, allocate space. */
11571 for (p = eh->relocs_copied; p != NULL; p = p->next)
11572 {
11573 asection *sreloc = elf_section_data (p->section)->sreloc;
11574 sreloc->size += p->count * RELOC_SIZE (htab);
11575 }
11576
11577 return TRUE;
11578 }
11579
11580 /* Find any dynamic relocs that apply to read-only sections. */
11581
11582 static bfd_boolean
11583 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11584 {
11585 struct elf32_arm_link_hash_entry * eh;
11586 struct elf32_arm_relocs_copied * p;
11587
11588 if (h->root.type == bfd_link_hash_warning)
11589 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11590
11591 eh = (struct elf32_arm_link_hash_entry *) h;
11592 for (p = eh->relocs_copied; p != NULL; p = p->next)
11593 {
11594 asection *s = p->section;
11595
11596 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11597 {
11598 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11599
11600 info->flags |= DF_TEXTREL;
11601
11602 /* Not an error, just cut short the traversal. */
11603 return FALSE;
11604 }
11605 }
11606 return TRUE;
11607 }
11608
11609 void
11610 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11611 int byteswap_code)
11612 {
11613 struct elf32_arm_link_hash_table *globals;
11614
11615 globals = elf32_arm_hash_table (info);
11616 globals->byteswap_code = byteswap_code;
11617 }
11618
11619 /* Set the sizes of the dynamic sections. */
11620
11621 static bfd_boolean
11622 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11623 struct bfd_link_info * info)
11624 {
11625 bfd * dynobj;
11626 asection * s;
11627 bfd_boolean plt;
11628 bfd_boolean relocs;
11629 bfd *ibfd;
11630 struct elf32_arm_link_hash_table *htab;
11631
11632 htab = elf32_arm_hash_table (info);
11633 dynobj = elf_hash_table (info)->dynobj;
11634 BFD_ASSERT (dynobj != NULL);
11635 check_use_blx (htab);
11636
11637 if (elf_hash_table (info)->dynamic_sections_created)
11638 {
11639 /* Set the contents of the .interp section to the interpreter. */
11640 if (info->executable)
11641 {
11642 s = bfd_get_section_by_name (dynobj, ".interp");
11643 BFD_ASSERT (s != NULL);
11644 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11645 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11646 }
11647 }
11648
11649 /* Set up .got offsets for local syms, and space for local dynamic
11650 relocs. */
11651 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11652 {
11653 bfd_signed_vma *local_got;
11654 bfd_signed_vma *end_local_got;
11655 char *local_tls_type;
11656 bfd_size_type locsymcount;
11657 Elf_Internal_Shdr *symtab_hdr;
11658 asection *srel;
11659 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11660
11661 if (! is_arm_elf (ibfd))
11662 continue;
11663
11664 for (s = ibfd->sections; s != NULL; s = s->next)
11665 {
11666 struct elf32_arm_relocs_copied *p;
11667
11668 for (p = (struct elf32_arm_relocs_copied *)
11669 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11670 {
11671 if (!bfd_is_abs_section (p->section)
11672 && bfd_is_abs_section (p->section->output_section))
11673 {
11674 /* Input section has been discarded, either because
11675 it is a copy of a linkonce section or due to
11676 linker script /DISCARD/, so we'll be discarding
11677 the relocs too. */
11678 }
11679 else if (is_vxworks
11680 && strcmp (p->section->output_section->name,
11681 ".tls_vars") == 0)
11682 {
11683 /* Relocations in vxworks .tls_vars sections are
11684 handled specially by the loader. */
11685 }
11686 else if (p->count != 0)
11687 {
11688 srel = elf_section_data (p->section)->sreloc;
11689 srel->size += p->count * RELOC_SIZE (htab);
11690 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11691 info->flags |= DF_TEXTREL;
11692 }
11693 }
11694 }
11695
11696 local_got = elf_local_got_refcounts (ibfd);
11697 if (!local_got)
11698 continue;
11699
11700 symtab_hdr = & elf_symtab_hdr (ibfd);
11701 locsymcount = symtab_hdr->sh_info;
11702 end_local_got = local_got + locsymcount;
11703 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11704 s = htab->sgot;
11705 srel = htab->srelgot;
11706 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11707 {
11708 if (*local_got > 0)
11709 {
11710 *local_got = s->size;
11711 if (*local_tls_type & GOT_TLS_GD)
11712 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11713 s->size += 8;
11714 if (*local_tls_type & GOT_TLS_IE)
11715 s->size += 4;
11716 if (*local_tls_type == GOT_NORMAL)
11717 s->size += 4;
11718
11719 if (info->shared || *local_tls_type == GOT_TLS_GD)
11720 srel->size += RELOC_SIZE (htab);
11721 }
11722 else
11723 *local_got = (bfd_vma) -1;
11724 }
11725 }
11726
11727 if (htab->tls_ldm_got.refcount > 0)
11728 {
11729 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11730 for R_ARM_TLS_LDM32 relocations. */
11731 htab->tls_ldm_got.offset = htab->sgot->size;
11732 htab->sgot->size += 8;
11733 if (info->shared)
11734 htab->srelgot->size += RELOC_SIZE (htab);
11735 }
11736 else
11737 htab->tls_ldm_got.offset = -1;
11738
11739 /* Allocate global sym .plt and .got entries, and space for global
11740 sym dynamic relocs. */
11741 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11742
11743 /* Here we rummage through the found bfds to collect glue information. */
11744 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11745 {
11746 if (! is_arm_elf (ibfd))
11747 continue;
11748
11749 /* Initialise mapping tables for code/data. */
11750 bfd_elf32_arm_init_maps (ibfd);
11751
11752 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11753 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11754 /* xgettext:c-format */
11755 _bfd_error_handler (_("Errors encountered processing file %s"),
11756 ibfd->filename);
11757 }
11758
11759 /* Allocate space for the glue sections now that we've sized them. */
11760 bfd_elf32_arm_allocate_interworking_sections (info);
11761
11762 /* The check_relocs and adjust_dynamic_symbol entry points have
11763 determined the sizes of the various dynamic sections. Allocate
11764 memory for them. */
11765 plt = FALSE;
11766 relocs = FALSE;
11767 for (s = dynobj->sections; s != NULL; s = s->next)
11768 {
11769 const char * name;
11770
11771 if ((s->flags & SEC_LINKER_CREATED) == 0)
11772 continue;
11773
11774 /* It's OK to base decisions on the section name, because none
11775 of the dynobj section names depend upon the input files. */
11776 name = bfd_get_section_name (dynobj, s);
11777
11778 if (strcmp (name, ".plt") == 0)
11779 {
11780 /* Remember whether there is a PLT. */
11781 plt = s->size != 0;
11782 }
11783 else if (CONST_STRNEQ (name, ".rel"))
11784 {
11785 if (s->size != 0)
11786 {
11787 /* Remember whether there are any reloc sections other
11788 than .rel(a).plt and .rela.plt.unloaded. */
11789 if (s != htab->srelplt && s != htab->srelplt2)
11790 relocs = TRUE;
11791
11792 /* We use the reloc_count field as a counter if we need
11793 to copy relocs into the output file. */
11794 s->reloc_count = 0;
11795 }
11796 }
11797 else if (! CONST_STRNEQ (name, ".got")
11798 && strcmp (name, ".dynbss") != 0)
11799 {
11800 /* It's not one of our sections, so don't allocate space. */
11801 continue;
11802 }
11803
11804 if (s->size == 0)
11805 {
11806 /* If we don't need this section, strip it from the
11807 output file. This is mostly to handle .rel(a).bss and
11808 .rel(a).plt. We must create both sections in
11809 create_dynamic_sections, because they must be created
11810 before the linker maps input sections to output
11811 sections. The linker does that before
11812 adjust_dynamic_symbol is called, and it is that
11813 function which decides whether anything needs to go
11814 into these sections. */
11815 s->flags |= SEC_EXCLUDE;
11816 continue;
11817 }
11818
11819 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11820 continue;
11821
11822 /* Allocate memory for the section contents. */
11823 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
11824 if (s->contents == NULL)
11825 return FALSE;
11826 }
11827
11828 if (elf_hash_table (info)->dynamic_sections_created)
11829 {
11830 /* Add some entries to the .dynamic section. We fill in the
11831 values later, in elf32_arm_finish_dynamic_sections, but we
11832 must add the entries now so that we get the correct size for
11833 the .dynamic section. The DT_DEBUG entry is filled in by the
11834 dynamic linker and used by the debugger. */
11835 #define add_dynamic_entry(TAG, VAL) \
11836 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11837
11838 if (info->executable)
11839 {
11840 if (!add_dynamic_entry (DT_DEBUG, 0))
11841 return FALSE;
11842 }
11843
11844 if (plt)
11845 {
11846 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11847 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11848 || !add_dynamic_entry (DT_PLTREL,
11849 htab->use_rel ? DT_REL : DT_RELA)
11850 || !add_dynamic_entry (DT_JMPREL, 0))
11851 return FALSE;
11852 }
11853
11854 if (relocs)
11855 {
11856 if (htab->use_rel)
11857 {
11858 if (!add_dynamic_entry (DT_REL, 0)
11859 || !add_dynamic_entry (DT_RELSZ, 0)
11860 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11861 return FALSE;
11862 }
11863 else
11864 {
11865 if (!add_dynamic_entry (DT_RELA, 0)
11866 || !add_dynamic_entry (DT_RELASZ, 0)
11867 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11868 return FALSE;
11869 }
11870 }
11871
11872 /* If any dynamic relocs apply to a read-only section,
11873 then we need a DT_TEXTREL entry. */
11874 if ((info->flags & DF_TEXTREL) == 0)
11875 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11876 info);
11877
11878 if ((info->flags & DF_TEXTREL) != 0)
11879 {
11880 if (!add_dynamic_entry (DT_TEXTREL, 0))
11881 return FALSE;
11882 }
11883 if (htab->vxworks_p
11884 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11885 return FALSE;
11886 }
11887 #undef add_dynamic_entry
11888
11889 return TRUE;
11890 }
11891
11892 /* Finish up dynamic symbol handling. We set the contents of various
11893 dynamic sections here. */
11894
11895 static bfd_boolean
11896 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11897 struct bfd_link_info * info,
11898 struct elf_link_hash_entry * h,
11899 Elf_Internal_Sym * sym)
11900 {
11901 bfd * dynobj;
11902 struct elf32_arm_link_hash_table *htab;
11903 struct elf32_arm_link_hash_entry *eh;
11904
11905 dynobj = elf_hash_table (info)->dynobj;
11906 htab = elf32_arm_hash_table (info);
11907 eh = (struct elf32_arm_link_hash_entry *) h;
11908
11909 if (h->plt.offset != (bfd_vma) -1)
11910 {
11911 asection * splt;
11912 asection * srel;
11913 bfd_byte *loc;
11914 bfd_vma plt_index;
11915 Elf_Internal_Rela rel;
11916
11917 /* This symbol has an entry in the procedure linkage table. Set
11918 it up. */
11919
11920 BFD_ASSERT (h->dynindx != -1);
11921
11922 splt = bfd_get_section_by_name (dynobj, ".plt");
11923 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11924 BFD_ASSERT (splt != NULL && srel != NULL);
11925
11926 /* Fill in the entry in the procedure linkage table. */
11927 if (htab->symbian_p)
11928 {
11929 put_arm_insn (htab, output_bfd,
11930 elf32_arm_symbian_plt_entry[0],
11931 splt->contents + h->plt.offset);
11932 bfd_put_32 (output_bfd,
11933 elf32_arm_symbian_plt_entry[1],
11934 splt->contents + h->plt.offset + 4);
11935
11936 /* Fill in the entry in the .rel.plt section. */
11937 rel.r_offset = (splt->output_section->vma
11938 + splt->output_offset
11939 + h->plt.offset + 4);
11940 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11941
11942 /* Get the index in the procedure linkage table which
11943 corresponds to this symbol. This is the index of this symbol
11944 in all the symbols for which we are making plt entries. The
11945 first entry in the procedure linkage table is reserved. */
11946 plt_index = ((h->plt.offset - htab->plt_header_size)
11947 / htab->plt_entry_size);
11948 }
11949 else
11950 {
11951 bfd_vma got_offset, got_address, plt_address;
11952 bfd_vma got_displacement;
11953 asection * sgot;
11954 bfd_byte * ptr;
11955
11956 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11957 BFD_ASSERT (sgot != NULL);
11958
11959 /* Get the offset into the .got.plt table of the entry that
11960 corresponds to this function. */
11961 got_offset = eh->plt_got_offset;
11962
11963 /* Get the index in the procedure linkage table which
11964 corresponds to this symbol. This is the index of this symbol
11965 in all the symbols for which we are making plt entries. The
11966 first three entries in .got.plt are reserved; after that
11967 symbols appear in the same order as in .plt. */
11968 plt_index = (got_offset - 12) / 4;
11969
11970 /* Calculate the address of the GOT entry. */
11971 got_address = (sgot->output_section->vma
11972 + sgot->output_offset
11973 + got_offset);
11974
11975 /* ...and the address of the PLT entry. */
11976 plt_address = (splt->output_section->vma
11977 + splt->output_offset
11978 + h->plt.offset);
11979
11980 ptr = htab->splt->contents + h->plt.offset;
11981 if (htab->vxworks_p && info->shared)
11982 {
11983 unsigned int i;
11984 bfd_vma val;
11985
11986 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11987 {
11988 val = elf32_arm_vxworks_shared_plt_entry[i];
11989 if (i == 2)
11990 val |= got_address - sgot->output_section->vma;
11991 if (i == 5)
11992 val |= plt_index * RELOC_SIZE (htab);
11993 if (i == 2 || i == 5)
11994 bfd_put_32 (output_bfd, val, ptr);
11995 else
11996 put_arm_insn (htab, output_bfd, val, ptr);
11997 }
11998 }
11999 else if (htab->vxworks_p)
12000 {
12001 unsigned int i;
12002 bfd_vma val;
12003
12004 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12005 {
12006 val = elf32_arm_vxworks_exec_plt_entry[i];
12007 if (i == 2)
12008 val |= got_address;
12009 if (i == 4)
12010 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12011 if (i == 5)
12012 val |= plt_index * RELOC_SIZE (htab);
12013 if (i == 2 || i == 5)
12014 bfd_put_32 (output_bfd, val, ptr);
12015 else
12016 put_arm_insn (htab, output_bfd, val, ptr);
12017 }
12018
12019 loc = (htab->srelplt2->contents
12020 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12021
12022 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12023 referencing the GOT for this PLT entry. */
12024 rel.r_offset = plt_address + 8;
12025 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12026 rel.r_addend = got_offset;
12027 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12028 loc += RELOC_SIZE (htab);
12029
12030 /* Create the R_ARM_ABS32 relocation referencing the
12031 beginning of the PLT for this GOT entry. */
12032 rel.r_offset = got_address;
12033 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12034 rel.r_addend = 0;
12035 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12036 }
12037 else
12038 {
12039 bfd_signed_vma thumb_refs;
12040 /* Calculate the displacement between the PLT slot and the
12041 entry in the GOT. The eight-byte offset accounts for the
12042 value produced by adding to pc in the first instruction
12043 of the PLT stub. */
12044 got_displacement = got_address - (plt_address + 8);
12045
12046 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12047
12048 thumb_refs = eh->plt_thumb_refcount;
12049 if (!htab->use_blx)
12050 thumb_refs += eh->plt_maybe_thumb_refcount;
12051
12052 if (thumb_refs > 0)
12053 {
12054 put_thumb_insn (htab, output_bfd,
12055 elf32_arm_plt_thumb_stub[0], ptr - 4);
12056 put_thumb_insn (htab, output_bfd,
12057 elf32_arm_plt_thumb_stub[1], ptr - 2);
12058 }
12059
12060 put_arm_insn (htab, output_bfd,
12061 elf32_arm_plt_entry[0]
12062 | ((got_displacement & 0x0ff00000) >> 20),
12063 ptr + 0);
12064 put_arm_insn (htab, output_bfd,
12065 elf32_arm_plt_entry[1]
12066 | ((got_displacement & 0x000ff000) >> 12),
12067 ptr+ 4);
12068 put_arm_insn (htab, output_bfd,
12069 elf32_arm_plt_entry[2]
12070 | (got_displacement & 0x00000fff),
12071 ptr + 8);
12072 #ifdef FOUR_WORD_PLT
12073 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12074 #endif
12075 }
12076
12077 /* Fill in the entry in the global offset table. */
12078 bfd_put_32 (output_bfd,
12079 (splt->output_section->vma
12080 + splt->output_offset),
12081 sgot->contents + got_offset);
12082
12083 /* Fill in the entry in the .rel(a).plt section. */
12084 rel.r_addend = 0;
12085 rel.r_offset = got_address;
12086 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12087 }
12088
12089 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12090 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12091
12092 if (!h->def_regular)
12093 {
12094 /* Mark the symbol as undefined, rather than as defined in
12095 the .plt section. Leave the value alone. */
12096 sym->st_shndx = SHN_UNDEF;
12097 /* If the symbol is weak, we do need to clear the value.
12098 Otherwise, the PLT entry would provide a definition for
12099 the symbol even if the symbol wasn't defined anywhere,
12100 and so the symbol would never be NULL. */
12101 if (!h->ref_regular_nonweak)
12102 sym->st_value = 0;
12103 }
12104 }
12105
12106 if (h->got.offset != (bfd_vma) -1
12107 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12108 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12109 {
12110 asection * sgot;
12111 asection * srel;
12112 Elf_Internal_Rela rel;
12113 bfd_byte *loc;
12114 bfd_vma offset;
12115
12116 /* This symbol has an entry in the global offset table. Set it
12117 up. */
12118 sgot = bfd_get_section_by_name (dynobj, ".got");
12119 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12120 BFD_ASSERT (sgot != NULL && srel != NULL);
12121
12122 offset = (h->got.offset & ~(bfd_vma) 1);
12123 rel.r_addend = 0;
12124 rel.r_offset = (sgot->output_section->vma
12125 + sgot->output_offset
12126 + offset);
12127
12128 /* If this is a static link, or it is a -Bsymbolic link and the
12129 symbol is defined locally or was forced to be local because
12130 of a version file, we just want to emit a RELATIVE reloc.
12131 The entry in the global offset table will already have been
12132 initialized in the relocate_section function. */
12133 if (info->shared
12134 && SYMBOL_REFERENCES_LOCAL (info, h))
12135 {
12136 BFD_ASSERT ((h->got.offset & 1) != 0);
12137 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12138 if (!htab->use_rel)
12139 {
12140 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12141 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12142 }
12143 }
12144 else
12145 {
12146 BFD_ASSERT ((h->got.offset & 1) == 0);
12147 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12148 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12149 }
12150
12151 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12152 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12153 }
12154
12155 if (h->needs_copy)
12156 {
12157 asection * s;
12158 Elf_Internal_Rela rel;
12159 bfd_byte *loc;
12160
12161 /* This symbol needs a copy reloc. Set it up. */
12162 BFD_ASSERT (h->dynindx != -1
12163 && (h->root.type == bfd_link_hash_defined
12164 || h->root.type == bfd_link_hash_defweak));
12165
12166 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12167 RELOC_SECTION (htab, ".bss"));
12168 BFD_ASSERT (s != NULL);
12169
12170 rel.r_addend = 0;
12171 rel.r_offset = (h->root.u.def.value
12172 + h->root.u.def.section->output_section->vma
12173 + h->root.u.def.section->output_offset);
12174 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12175 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12176 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12177 }
12178
12179 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12180 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12181 to the ".got" section. */
12182 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12183 || (!htab->vxworks_p && h == htab->root.hgot))
12184 sym->st_shndx = SHN_ABS;
12185
12186 return TRUE;
12187 }
12188
12189 /* Finish up the dynamic sections. */
12190
12191 static bfd_boolean
12192 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12193 {
12194 bfd * dynobj;
12195 asection * sgot;
12196 asection * sdyn;
12197
12198 dynobj = elf_hash_table (info)->dynobj;
12199
12200 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12201 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12202 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12203
12204 if (elf_hash_table (info)->dynamic_sections_created)
12205 {
12206 asection *splt;
12207 Elf32_External_Dyn *dyncon, *dynconend;
12208 struct elf32_arm_link_hash_table *htab;
12209
12210 htab = elf32_arm_hash_table (info);
12211 splt = bfd_get_section_by_name (dynobj, ".plt");
12212 BFD_ASSERT (splt != NULL && sdyn != NULL);
12213
12214 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12215 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12216
12217 for (; dyncon < dynconend; dyncon++)
12218 {
12219 Elf_Internal_Dyn dyn;
12220 const char * name;
12221 asection * s;
12222
12223 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12224
12225 switch (dyn.d_tag)
12226 {
12227 unsigned int type;
12228
12229 default:
12230 if (htab->vxworks_p
12231 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12232 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12233 break;
12234
12235 case DT_HASH:
12236 name = ".hash";
12237 goto get_vma_if_bpabi;
12238 case DT_STRTAB:
12239 name = ".dynstr";
12240 goto get_vma_if_bpabi;
12241 case DT_SYMTAB:
12242 name = ".dynsym";
12243 goto get_vma_if_bpabi;
12244 case DT_VERSYM:
12245 name = ".gnu.version";
12246 goto get_vma_if_bpabi;
12247 case DT_VERDEF:
12248 name = ".gnu.version_d";
12249 goto get_vma_if_bpabi;
12250 case DT_VERNEED:
12251 name = ".gnu.version_r";
12252 goto get_vma_if_bpabi;
12253
12254 case DT_PLTGOT:
12255 name = ".got";
12256 goto get_vma;
12257 case DT_JMPREL:
12258 name = RELOC_SECTION (htab, ".plt");
12259 get_vma:
12260 s = bfd_get_section_by_name (output_bfd, name);
12261 BFD_ASSERT (s != NULL);
12262 if (!htab->symbian_p)
12263 dyn.d_un.d_ptr = s->vma;
12264 else
12265 /* In the BPABI, tags in the PT_DYNAMIC section point
12266 at the file offset, not the memory address, for the
12267 convenience of the post linker. */
12268 dyn.d_un.d_ptr = s->filepos;
12269 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12270 break;
12271
12272 get_vma_if_bpabi:
12273 if (htab->symbian_p)
12274 goto get_vma;
12275 break;
12276
12277 case DT_PLTRELSZ:
12278 s = bfd_get_section_by_name (output_bfd,
12279 RELOC_SECTION (htab, ".plt"));
12280 BFD_ASSERT (s != NULL);
12281 dyn.d_un.d_val = s->size;
12282 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12283 break;
12284
12285 case DT_RELSZ:
12286 case DT_RELASZ:
12287 if (!htab->symbian_p)
12288 {
12289 /* My reading of the SVR4 ABI indicates that the
12290 procedure linkage table relocs (DT_JMPREL) should be
12291 included in the overall relocs (DT_REL). This is
12292 what Solaris does. However, UnixWare can not handle
12293 that case. Therefore, we override the DT_RELSZ entry
12294 here to make it not include the JMPREL relocs. Since
12295 the linker script arranges for .rel(a).plt to follow all
12296 other relocation sections, we don't have to worry
12297 about changing the DT_REL entry. */
12298 s = bfd_get_section_by_name (output_bfd,
12299 RELOC_SECTION (htab, ".plt"));
12300 if (s != NULL)
12301 dyn.d_un.d_val -= s->size;
12302 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12303 break;
12304 }
12305 /* Fall through. */
12306
12307 case DT_REL:
12308 case DT_RELA:
12309 /* In the BPABI, the DT_REL tag must point at the file
12310 offset, not the VMA, of the first relocation
12311 section. So, we use code similar to that in
12312 elflink.c, but do not check for SHF_ALLOC on the
12313 relcoation section, since relocations sections are
12314 never allocated under the BPABI. The comments above
12315 about Unixware notwithstanding, we include all of the
12316 relocations here. */
12317 if (htab->symbian_p)
12318 {
12319 unsigned int i;
12320 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12321 ? SHT_REL : SHT_RELA);
12322 dyn.d_un.d_val = 0;
12323 for (i = 1; i < elf_numsections (output_bfd); i++)
12324 {
12325 Elf_Internal_Shdr *hdr
12326 = elf_elfsections (output_bfd)[i];
12327 if (hdr->sh_type == type)
12328 {
12329 if (dyn.d_tag == DT_RELSZ
12330 || dyn.d_tag == DT_RELASZ)
12331 dyn.d_un.d_val += hdr->sh_size;
12332 else if ((ufile_ptr) hdr->sh_offset
12333 <= dyn.d_un.d_val - 1)
12334 dyn.d_un.d_val = hdr->sh_offset;
12335 }
12336 }
12337 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12338 }
12339 break;
12340
12341 /* Set the bottom bit of DT_INIT/FINI if the
12342 corresponding function is Thumb. */
12343 case DT_INIT:
12344 name = info->init_function;
12345 goto get_sym;
12346 case DT_FINI:
12347 name = info->fini_function;
12348 get_sym:
12349 /* If it wasn't set by elf_bfd_final_link
12350 then there is nothing to adjust. */
12351 if (dyn.d_un.d_val != 0)
12352 {
12353 struct elf_link_hash_entry * eh;
12354
12355 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12356 FALSE, FALSE, TRUE);
12357 if (eh != NULL
12358 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12359 {
12360 dyn.d_un.d_val |= 1;
12361 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12362 }
12363 }
12364 break;
12365 }
12366 }
12367
12368 /* Fill in the first entry in the procedure linkage table. */
12369 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12370 {
12371 const bfd_vma *plt0_entry;
12372 bfd_vma got_address, plt_address, got_displacement;
12373
12374 /* Calculate the addresses of the GOT and PLT. */
12375 got_address = sgot->output_section->vma + sgot->output_offset;
12376 plt_address = splt->output_section->vma + splt->output_offset;
12377
12378 if (htab->vxworks_p)
12379 {
12380 /* The VxWorks GOT is relocated by the dynamic linker.
12381 Therefore, we must emit relocations rather than simply
12382 computing the values now. */
12383 Elf_Internal_Rela rel;
12384
12385 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12386 put_arm_insn (htab, output_bfd, plt0_entry[0],
12387 splt->contents + 0);
12388 put_arm_insn (htab, output_bfd, plt0_entry[1],
12389 splt->contents + 4);
12390 put_arm_insn (htab, output_bfd, plt0_entry[2],
12391 splt->contents + 8);
12392 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12393
12394 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12395 rel.r_offset = plt_address + 12;
12396 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12397 rel.r_addend = 0;
12398 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12399 htab->srelplt2->contents);
12400 }
12401 else
12402 {
12403 got_displacement = got_address - (plt_address + 16);
12404
12405 plt0_entry = elf32_arm_plt0_entry;
12406 put_arm_insn (htab, output_bfd, plt0_entry[0],
12407 splt->contents + 0);
12408 put_arm_insn (htab, output_bfd, plt0_entry[1],
12409 splt->contents + 4);
12410 put_arm_insn (htab, output_bfd, plt0_entry[2],
12411 splt->contents + 8);
12412 put_arm_insn (htab, output_bfd, plt0_entry[3],
12413 splt->contents + 12);
12414
12415 #ifdef FOUR_WORD_PLT
12416 /* The displacement value goes in the otherwise-unused
12417 last word of the second entry. */
12418 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12419 #else
12420 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12421 #endif
12422 }
12423 }
12424
12425 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12426 really seem like the right value. */
12427 if (splt->output_section->owner == output_bfd)
12428 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12429
12430 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12431 {
12432 /* Correct the .rel(a).plt.unloaded relocations. They will have
12433 incorrect symbol indexes. */
12434 int num_plts;
12435 unsigned char *p;
12436
12437 num_plts = ((htab->splt->size - htab->plt_header_size)
12438 / htab->plt_entry_size);
12439 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12440
12441 for (; num_plts; num_plts--)
12442 {
12443 Elf_Internal_Rela rel;
12444
12445 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12446 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12447 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12448 p += RELOC_SIZE (htab);
12449
12450 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12451 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12452 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12453 p += RELOC_SIZE (htab);
12454 }
12455 }
12456 }
12457
12458 /* Fill in the first three entries in the global offset table. */
12459 if (sgot)
12460 {
12461 if (sgot->size > 0)
12462 {
12463 if (sdyn == NULL)
12464 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12465 else
12466 bfd_put_32 (output_bfd,
12467 sdyn->output_section->vma + sdyn->output_offset,
12468 sgot->contents);
12469 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12470 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12471 }
12472
12473 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12474 }
12475
12476 return TRUE;
12477 }
12478
12479 static void
12480 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12481 {
12482 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12483 struct elf32_arm_link_hash_table *globals;
12484
12485 i_ehdrp = elf_elfheader (abfd);
12486
12487 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12488 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12489 else
12490 i_ehdrp->e_ident[EI_OSABI] = 0;
12491 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12492
12493 if (link_info)
12494 {
12495 globals = elf32_arm_hash_table (link_info);
12496 if (globals->byteswap_code)
12497 i_ehdrp->e_flags |= EF_ARM_BE8;
12498 }
12499 }
12500
12501 static enum elf_reloc_type_class
12502 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12503 {
12504 switch ((int) ELF32_R_TYPE (rela->r_info))
12505 {
12506 case R_ARM_RELATIVE:
12507 return reloc_class_relative;
12508 case R_ARM_JUMP_SLOT:
12509 return reloc_class_plt;
12510 case R_ARM_COPY:
12511 return reloc_class_copy;
12512 default:
12513 return reloc_class_normal;
12514 }
12515 }
12516
12517 /* Set the right machine number for an Arm ELF file. */
12518
12519 static bfd_boolean
12520 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12521 {
12522 if (hdr->sh_type == SHT_NOTE)
12523 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12524
12525 return TRUE;
12526 }
12527
12528 static void
12529 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12530 {
12531 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12532 }
12533
12534 /* Return TRUE if this is an unwinding table entry. */
12535
12536 static bfd_boolean
12537 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12538 {
12539 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12540 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12541 }
12542
12543
12544 /* Set the type and flags for an ARM section. We do this by
12545 the section name, which is a hack, but ought to work. */
12546
12547 static bfd_boolean
12548 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12549 {
12550 const char * name;
12551
12552 name = bfd_get_section_name (abfd, sec);
12553
12554 if (is_arm_elf_unwind_section_name (abfd, name))
12555 {
12556 hdr->sh_type = SHT_ARM_EXIDX;
12557 hdr->sh_flags |= SHF_LINK_ORDER;
12558 }
12559 return TRUE;
12560 }
12561
12562 /* Handle an ARM specific section when reading an object file. This is
12563 called when bfd_section_from_shdr finds a section with an unknown
12564 type. */
12565
12566 static bfd_boolean
12567 elf32_arm_section_from_shdr (bfd *abfd,
12568 Elf_Internal_Shdr * hdr,
12569 const char *name,
12570 int shindex)
12571 {
12572 /* There ought to be a place to keep ELF backend specific flags, but
12573 at the moment there isn't one. We just keep track of the
12574 sections by their name, instead. Fortunately, the ABI gives
12575 names for all the ARM specific sections, so we will probably get
12576 away with this. */
12577 switch (hdr->sh_type)
12578 {
12579 case SHT_ARM_EXIDX:
12580 case SHT_ARM_PREEMPTMAP:
12581 case SHT_ARM_ATTRIBUTES:
12582 break;
12583
12584 default:
12585 return FALSE;
12586 }
12587
12588 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12589 return FALSE;
12590
12591 return TRUE;
12592 }
12593
12594 /* A structure used to record a list of sections, independently
12595 of the next and prev fields in the asection structure. */
12596 typedef struct section_list
12597 {
12598 asection * sec;
12599 struct section_list * next;
12600 struct section_list * prev;
12601 }
12602 section_list;
12603
12604 /* Unfortunately we need to keep a list of sections for which
12605 an _arm_elf_section_data structure has been allocated. This
12606 is because it is possible for functions like elf32_arm_write_section
12607 to be called on a section which has had an elf_data_structure
12608 allocated for it (and so the used_by_bfd field is valid) but
12609 for which the ARM extended version of this structure - the
12610 _arm_elf_section_data structure - has not been allocated. */
12611 static section_list * sections_with_arm_elf_section_data = NULL;
12612
12613 static void
12614 record_section_with_arm_elf_section_data (asection * sec)
12615 {
12616 struct section_list * entry;
12617
12618 entry = (struct section_list *) bfd_malloc (sizeof (* entry));
12619 if (entry == NULL)
12620 return;
12621 entry->sec = sec;
12622 entry->next = sections_with_arm_elf_section_data;
12623 entry->prev = NULL;
12624 if (entry->next != NULL)
12625 entry->next->prev = entry;
12626 sections_with_arm_elf_section_data = entry;
12627 }
12628
12629 static struct section_list *
12630 find_arm_elf_section_entry (asection * sec)
12631 {
12632 struct section_list * entry;
12633 static struct section_list * last_entry = NULL;
12634
12635 /* This is a short cut for the typical case where the sections are added
12636 to the sections_with_arm_elf_section_data list in forward order and
12637 then looked up here in backwards order. This makes a real difference
12638 to the ld-srec/sec64k.exp linker test. */
12639 entry = sections_with_arm_elf_section_data;
12640 if (last_entry != NULL)
12641 {
12642 if (last_entry->sec == sec)
12643 entry = last_entry;
12644 else if (last_entry->next != NULL
12645 && last_entry->next->sec == sec)
12646 entry = last_entry->next;
12647 }
12648
12649 for (; entry; entry = entry->next)
12650 if (entry->sec == sec)
12651 break;
12652
12653 if (entry)
12654 /* Record the entry prior to this one - it is the entry we are most
12655 likely to want to locate next time. Also this way if we have been
12656 called from unrecord_section_with_arm_elf_section_data() we will not
12657 be caching a pointer that is about to be freed. */
12658 last_entry = entry->prev;
12659
12660 return entry;
12661 }
12662
12663 static _arm_elf_section_data *
12664 get_arm_elf_section_data (asection * sec)
12665 {
12666 struct section_list * entry;
12667
12668 entry = find_arm_elf_section_entry (sec);
12669
12670 if (entry)
12671 return elf32_arm_section_data (entry->sec);
12672 else
12673 return NULL;
12674 }
12675
12676 static void
12677 unrecord_section_with_arm_elf_section_data (asection * sec)
12678 {
12679 struct section_list * entry;
12680
12681 entry = find_arm_elf_section_entry (sec);
12682
12683 if (entry)
12684 {
12685 if (entry->prev != NULL)
12686 entry->prev->next = entry->next;
12687 if (entry->next != NULL)
12688 entry->next->prev = entry->prev;
12689 if (entry == sections_with_arm_elf_section_data)
12690 sections_with_arm_elf_section_data = entry->next;
12691 free (entry);
12692 }
12693 }
12694
12695
12696 typedef struct
12697 {
12698 void *finfo;
12699 struct bfd_link_info *info;
12700 asection *sec;
12701 int sec_shndx;
12702 int (*func) (void *, const char *, Elf_Internal_Sym *,
12703 asection *, struct elf_link_hash_entry *);
12704 } output_arch_syminfo;
12705
12706 enum map_symbol_type
12707 {
12708 ARM_MAP_ARM,
12709 ARM_MAP_THUMB,
12710 ARM_MAP_DATA
12711 };
12712
12713
12714 /* Output a single mapping symbol. */
12715
12716 static bfd_boolean
12717 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12718 enum map_symbol_type type,
12719 bfd_vma offset)
12720 {
12721 static const char *names[3] = {"$a", "$t", "$d"};
12722 struct elf32_arm_link_hash_table *htab;
12723 Elf_Internal_Sym sym;
12724
12725 htab = elf32_arm_hash_table (osi->info);
12726 sym.st_value = osi->sec->output_section->vma
12727 + osi->sec->output_offset
12728 + offset;
12729 sym.st_size = 0;
12730 sym.st_other = 0;
12731 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12732 sym.st_shndx = osi->sec_shndx;
12733 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12734 }
12735
12736
12737 /* Output mapping symbols for PLT entries associated with H. */
12738
12739 static bfd_boolean
12740 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12741 {
12742 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12743 struct elf32_arm_link_hash_table *htab;
12744 struct elf32_arm_link_hash_entry *eh;
12745 bfd_vma addr;
12746
12747 htab = elf32_arm_hash_table (osi->info);
12748
12749 if (h->root.type == bfd_link_hash_indirect)
12750 return TRUE;
12751
12752 if (h->root.type == bfd_link_hash_warning)
12753 /* When warning symbols are created, they **replace** the "real"
12754 entry in the hash table, thus we never get to see the real
12755 symbol in a hash traversal. So look at it now. */
12756 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12757
12758 if (h->plt.offset == (bfd_vma) -1)
12759 return TRUE;
12760
12761 eh = (struct elf32_arm_link_hash_entry *) h;
12762 addr = h->plt.offset;
12763 if (htab->symbian_p)
12764 {
12765 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12766 return FALSE;
12767 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12768 return FALSE;
12769 }
12770 else if (htab->vxworks_p)
12771 {
12772 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12773 return FALSE;
12774 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12775 return FALSE;
12776 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12777 return FALSE;
12778 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12779 return FALSE;
12780 }
12781 else
12782 {
12783 bfd_signed_vma thumb_refs;
12784
12785 thumb_refs = eh->plt_thumb_refcount;
12786 if (!htab->use_blx)
12787 thumb_refs += eh->plt_maybe_thumb_refcount;
12788
12789 if (thumb_refs > 0)
12790 {
12791 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12792 return FALSE;
12793 }
12794 #ifdef FOUR_WORD_PLT
12795 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12796 return FALSE;
12797 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12798 return FALSE;
12799 #else
12800 /* A three-word PLT with no Thumb thunk contains only Arm code,
12801 so only need to output a mapping symbol for the first PLT entry and
12802 entries with thumb thunks. */
12803 if (thumb_refs > 0 || addr == 20)
12804 {
12805 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12806 return FALSE;
12807 }
12808 #endif
12809 }
12810
12811 return TRUE;
12812 }
12813
12814 /* Output a single local symbol for a generated stub. */
12815
12816 static bfd_boolean
12817 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12818 bfd_vma offset, bfd_vma size)
12819 {
12820 struct elf32_arm_link_hash_table *htab;
12821 Elf_Internal_Sym sym;
12822
12823 htab = elf32_arm_hash_table (osi->info);
12824 sym.st_value = osi->sec->output_section->vma
12825 + osi->sec->output_offset
12826 + offset;
12827 sym.st_size = size;
12828 sym.st_other = 0;
12829 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12830 sym.st_shndx = osi->sec_shndx;
12831 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12832 }
12833
12834 static bfd_boolean
12835 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12836 void * in_arg)
12837 {
12838 struct elf32_arm_stub_hash_entry *stub_entry;
12839 struct bfd_link_info *info;
12840 struct elf32_arm_link_hash_table *htab;
12841 asection *stub_sec;
12842 bfd_vma addr;
12843 char *stub_name;
12844 output_arch_syminfo *osi;
12845 const insn_sequence *template_sequence;
12846 enum stub_insn_type prev_type;
12847 int size;
12848 int i;
12849 enum map_symbol_type sym_type;
12850
12851 /* Massage our args to the form they really have. */
12852 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12853 osi = (output_arch_syminfo *) in_arg;
12854
12855 info = osi->info;
12856
12857 htab = elf32_arm_hash_table (info);
12858 stub_sec = stub_entry->stub_sec;
12859
12860 /* Ensure this stub is attached to the current section being
12861 processed. */
12862 if (stub_sec != osi->sec)
12863 return TRUE;
12864
12865 addr = (bfd_vma) stub_entry->stub_offset;
12866 stub_name = stub_entry->output_name;
12867
12868 template_sequence = stub_entry->stub_template;
12869 switch (template_sequence[0].type)
12870 {
12871 case ARM_TYPE:
12872 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12873 return FALSE;
12874 break;
12875 case THUMB16_TYPE:
12876 case THUMB32_TYPE:
12877 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12878 stub_entry->stub_size))
12879 return FALSE;
12880 break;
12881 default:
12882 BFD_FAIL ();
12883 return 0;
12884 }
12885
12886 prev_type = DATA_TYPE;
12887 size = 0;
12888 for (i = 0; i < stub_entry->stub_template_size; i++)
12889 {
12890 switch (template_sequence[i].type)
12891 {
12892 case ARM_TYPE:
12893 sym_type = ARM_MAP_ARM;
12894 break;
12895
12896 case THUMB16_TYPE:
12897 case THUMB32_TYPE:
12898 sym_type = ARM_MAP_THUMB;
12899 break;
12900
12901 case DATA_TYPE:
12902 sym_type = ARM_MAP_DATA;
12903 break;
12904
12905 default:
12906 BFD_FAIL ();
12907 return FALSE;
12908 }
12909
12910 if (template_sequence[i].type != prev_type)
12911 {
12912 prev_type = template_sequence[i].type;
12913 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12914 return FALSE;
12915 }
12916
12917 switch (template_sequence[i].type)
12918 {
12919 case ARM_TYPE:
12920 case THUMB32_TYPE:
12921 size += 4;
12922 break;
12923
12924 case THUMB16_TYPE:
12925 size += 2;
12926 break;
12927
12928 case DATA_TYPE:
12929 size += 4;
12930 break;
12931
12932 default:
12933 BFD_FAIL ();
12934 return FALSE;
12935 }
12936 }
12937
12938 return TRUE;
12939 }
12940
12941 /* Output mapping symbols for linker generated sections. */
12942
12943 static bfd_boolean
12944 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12945 struct bfd_link_info *info,
12946 void *finfo,
12947 int (*func) (void *, const char *,
12948 Elf_Internal_Sym *,
12949 asection *,
12950 struct elf_link_hash_entry *))
12951 {
12952 output_arch_syminfo osi;
12953 struct elf32_arm_link_hash_table *htab;
12954 bfd_vma offset;
12955 bfd_size_type size;
12956
12957 htab = elf32_arm_hash_table (info);
12958 check_use_blx (htab);
12959
12960 osi.finfo = finfo;
12961 osi.info = info;
12962 osi.func = func;
12963
12964 /* ARM->Thumb glue. */
12965 if (htab->arm_glue_size > 0)
12966 {
12967 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12968 ARM2THUMB_GLUE_SECTION_NAME);
12969
12970 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12971 (output_bfd, osi.sec->output_section);
12972 if (info->shared || htab->root.is_relocatable_executable
12973 || htab->pic_veneer)
12974 size = ARM2THUMB_PIC_GLUE_SIZE;
12975 else if (htab->use_blx)
12976 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12977 else
12978 size = ARM2THUMB_STATIC_GLUE_SIZE;
12979
12980 for (offset = 0; offset < htab->arm_glue_size; offset += size)
12981 {
12982 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
12983 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
12984 }
12985 }
12986
12987 /* Thumb->ARM glue. */
12988 if (htab->thumb_glue_size > 0)
12989 {
12990 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12991 THUMB2ARM_GLUE_SECTION_NAME);
12992
12993 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12994 (output_bfd, osi.sec->output_section);
12995 size = THUMB2ARM_GLUE_SIZE;
12996
12997 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
12998 {
12999 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13000 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13001 }
13002 }
13003
13004 /* ARMv4 BX veneers. */
13005 if (htab->bx_glue_size > 0)
13006 {
13007 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13008 ARM_BX_GLUE_SECTION_NAME);
13009
13010 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13011 (output_bfd, osi.sec->output_section);
13012
13013 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13014 }
13015
13016 /* Long calls stubs. */
13017 if (htab->stub_bfd && htab->stub_bfd->sections)
13018 {
13019 asection* stub_sec;
13020
13021 for (stub_sec = htab->stub_bfd->sections;
13022 stub_sec != NULL;
13023 stub_sec = stub_sec->next)
13024 {
13025 /* Ignore non-stub sections. */
13026 if (!strstr (stub_sec->name, STUB_SUFFIX))
13027 continue;
13028
13029 osi.sec = stub_sec;
13030
13031 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13032 (output_bfd, osi.sec->output_section);
13033
13034 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13035 }
13036 }
13037
13038 /* Finally, output mapping symbols for the PLT. */
13039 if (!htab->splt || htab->splt->size == 0)
13040 return TRUE;
13041
13042 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13043 htab->splt->output_section);
13044 osi.sec = htab->splt;
13045 /* Output mapping symbols for the plt header. SymbianOS does not have a
13046 plt header. */
13047 if (htab->vxworks_p)
13048 {
13049 /* VxWorks shared libraries have no PLT header. */
13050 if (!info->shared)
13051 {
13052 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13053 return FALSE;
13054 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13055 return FALSE;
13056 }
13057 }
13058 else if (!htab->symbian_p)
13059 {
13060 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13061 return FALSE;
13062 #ifndef FOUR_WORD_PLT
13063 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13064 return FALSE;
13065 #endif
13066 }
13067
13068 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13069 return TRUE;
13070 }
13071
13072 /* Allocate target specific section data. */
13073
13074 static bfd_boolean
13075 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13076 {
13077 if (!sec->used_by_bfd)
13078 {
13079 _arm_elf_section_data *sdata;
13080 bfd_size_type amt = sizeof (*sdata);
13081
13082 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13083 if (sdata == NULL)
13084 return FALSE;
13085 sec->used_by_bfd = sdata;
13086 }
13087
13088 record_section_with_arm_elf_section_data (sec);
13089
13090 return _bfd_elf_new_section_hook (abfd, sec);
13091 }
13092
13093
13094 /* Used to order a list of mapping symbols by address. */
13095
13096 static int
13097 elf32_arm_compare_mapping (const void * a, const void * b)
13098 {
13099 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13100 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13101
13102 if (amap->vma > bmap->vma)
13103 return 1;
13104 else if (amap->vma < bmap->vma)
13105 return -1;
13106 else if (amap->type > bmap->type)
13107 /* Ensure results do not depend on the host qsort for objects with
13108 multiple mapping symbols at the same address by sorting on type
13109 after vma. */
13110 return 1;
13111 else if (amap->type < bmap->type)
13112 return -1;
13113 else
13114 return 0;
13115 }
13116
13117 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13118
13119 static unsigned long
13120 offset_prel31 (unsigned long addr, bfd_vma offset)
13121 {
13122 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13123 }
13124
13125 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13126 relocations. */
13127
13128 static void
13129 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13130 {
13131 unsigned long first_word = bfd_get_32 (output_bfd, from);
13132 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13133
13134 /* High bit of first word is supposed to be zero. */
13135 if ((first_word & 0x80000000ul) == 0)
13136 first_word = offset_prel31 (first_word, offset);
13137
13138 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13139 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13140 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13141 second_word = offset_prel31 (second_word, offset);
13142
13143 bfd_put_32 (output_bfd, first_word, to);
13144 bfd_put_32 (output_bfd, second_word, to + 4);
13145 }
13146
13147 /* Data for make_branch_to_a8_stub(). */
13148
13149 struct a8_branch_to_stub_data {
13150 asection *writing_section;
13151 bfd_byte *contents;
13152 };
13153
13154
13155 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13156 places for a particular section. */
13157
13158 static bfd_boolean
13159 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13160 void *in_arg)
13161 {
13162 struct elf32_arm_stub_hash_entry *stub_entry;
13163 struct a8_branch_to_stub_data *data;
13164 bfd_byte *contents;
13165 unsigned long branch_insn;
13166 bfd_vma veneered_insn_loc, veneer_entry_loc;
13167 bfd_signed_vma branch_offset;
13168 bfd *abfd;
13169 unsigned int index;
13170
13171 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13172 data = (struct a8_branch_to_stub_data *) in_arg;
13173
13174 if (stub_entry->target_section != data->writing_section
13175 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13176 return TRUE;
13177
13178 contents = data->contents;
13179
13180 veneered_insn_loc = stub_entry->target_section->output_section->vma
13181 + stub_entry->target_section->output_offset
13182 + stub_entry->target_value;
13183
13184 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13185 + stub_entry->stub_sec->output_offset
13186 + stub_entry->stub_offset;
13187
13188 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13189 veneered_insn_loc &= ~3u;
13190
13191 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13192
13193 abfd = stub_entry->target_section->owner;
13194 index = stub_entry->target_value;
13195
13196 /* We attempt to avoid this condition by setting stubs_always_after_branch
13197 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13198 This check is just to be on the safe side... */
13199 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13200 {
13201 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13202 "allocated in unsafe location"), abfd);
13203 return FALSE;
13204 }
13205
13206 switch (stub_entry->stub_type)
13207 {
13208 case arm_stub_a8_veneer_b:
13209 case arm_stub_a8_veneer_b_cond:
13210 branch_insn = 0xf0009000;
13211 goto jump24;
13212
13213 case arm_stub_a8_veneer_blx:
13214 branch_insn = 0xf000e800;
13215 goto jump24;
13216
13217 case arm_stub_a8_veneer_bl:
13218 {
13219 unsigned int i1, j1, i2, j2, s;
13220
13221 branch_insn = 0xf000d000;
13222
13223 jump24:
13224 if (branch_offset < -16777216 || branch_offset > 16777214)
13225 {
13226 /* There's not much we can do apart from complain if this
13227 happens. */
13228 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13229 "of range (input file too large)"), abfd);
13230 return FALSE;
13231 }
13232
13233 /* i1 = not(j1 eor s), so:
13234 not i1 = j1 eor s
13235 j1 = (not i1) eor s. */
13236
13237 branch_insn |= (branch_offset >> 1) & 0x7ff;
13238 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13239 i2 = (branch_offset >> 22) & 1;
13240 i1 = (branch_offset >> 23) & 1;
13241 s = (branch_offset >> 24) & 1;
13242 j1 = (!i1) ^ s;
13243 j2 = (!i2) ^ s;
13244 branch_insn |= j2 << 11;
13245 branch_insn |= j1 << 13;
13246 branch_insn |= s << 26;
13247 }
13248 break;
13249
13250 default:
13251 BFD_FAIL ();
13252 return FALSE;
13253 }
13254
13255 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13256 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13257
13258 return TRUE;
13259 }
13260
13261 /* Do code byteswapping. Return FALSE afterwards so that the section is
13262 written out as normal. */
13263
13264 static bfd_boolean
13265 elf32_arm_write_section (bfd *output_bfd,
13266 struct bfd_link_info *link_info,
13267 asection *sec,
13268 bfd_byte *contents)
13269 {
13270 unsigned int mapcount, errcount;
13271 _arm_elf_section_data *arm_data;
13272 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13273 elf32_arm_section_map *map;
13274 elf32_vfp11_erratum_list *errnode;
13275 bfd_vma ptr;
13276 bfd_vma end;
13277 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13278 bfd_byte tmp;
13279 unsigned int i;
13280
13281 /* If this section has not been allocated an _arm_elf_section_data
13282 structure then we cannot record anything. */
13283 arm_data = get_arm_elf_section_data (sec);
13284 if (arm_data == NULL)
13285 return FALSE;
13286
13287 mapcount = arm_data->mapcount;
13288 map = arm_data->map;
13289 errcount = arm_data->erratumcount;
13290
13291 if (errcount != 0)
13292 {
13293 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13294
13295 for (errnode = arm_data->erratumlist; errnode != 0;
13296 errnode = errnode->next)
13297 {
13298 bfd_vma index = errnode->vma - offset;
13299
13300 switch (errnode->type)
13301 {
13302 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13303 {
13304 bfd_vma branch_to_veneer;
13305 /* Original condition code of instruction, plus bit mask for
13306 ARM B instruction. */
13307 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13308 | 0x0a000000;
13309
13310 /* The instruction is before the label. */
13311 index -= 4;
13312
13313 /* Above offset included in -4 below. */
13314 branch_to_veneer = errnode->u.b.veneer->vma
13315 - errnode->vma - 4;
13316
13317 if ((signed) branch_to_veneer < -(1 << 25)
13318 || (signed) branch_to_veneer >= (1 << 25))
13319 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13320 "range"), output_bfd);
13321
13322 insn |= (branch_to_veneer >> 2) & 0xffffff;
13323 contents[endianflip ^ index] = insn & 0xff;
13324 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13325 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13326 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13327 }
13328 break;
13329
13330 case VFP11_ERRATUM_ARM_VENEER:
13331 {
13332 bfd_vma branch_from_veneer;
13333 unsigned int insn;
13334
13335 /* Take size of veneer into account. */
13336 branch_from_veneer = errnode->u.v.branch->vma
13337 - errnode->vma - 12;
13338
13339 if ((signed) branch_from_veneer < -(1 << 25)
13340 || (signed) branch_from_veneer >= (1 << 25))
13341 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13342 "range"), output_bfd);
13343
13344 /* Original instruction. */
13345 insn = errnode->u.v.branch->u.b.vfp_insn;
13346 contents[endianflip ^ index] = insn & 0xff;
13347 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13348 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13349 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13350
13351 /* Branch back to insn after original insn. */
13352 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13353 contents[endianflip ^ (index + 4)] = insn & 0xff;
13354 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13355 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13356 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13357 }
13358 break;
13359
13360 default:
13361 abort ();
13362 }
13363 }
13364 }
13365
13366 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13367 {
13368 arm_unwind_table_edit *edit_node
13369 = arm_data->u.exidx.unwind_edit_list;
13370 /* Now, sec->size is the size of the section we will write. The original
13371 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13372 markers) was sec->rawsize. (This isn't the case if we perform no
13373 edits, then rawsize will be zero and we should use size). */
13374 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13375 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13376 unsigned int in_index, out_index;
13377 bfd_vma add_to_offsets = 0;
13378
13379 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13380 {
13381 if (edit_node)
13382 {
13383 unsigned int edit_index = edit_node->index;
13384
13385 if (in_index < edit_index && in_index * 8 < input_size)
13386 {
13387 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13388 contents + in_index * 8, add_to_offsets);
13389 out_index++;
13390 in_index++;
13391 }
13392 else if (in_index == edit_index
13393 || (in_index * 8 >= input_size
13394 && edit_index == UINT_MAX))
13395 {
13396 switch (edit_node->type)
13397 {
13398 case DELETE_EXIDX_ENTRY:
13399 in_index++;
13400 add_to_offsets += 8;
13401 break;
13402
13403 case INSERT_EXIDX_CANTUNWIND_AT_END:
13404 {
13405 asection *text_sec = edit_node->linked_section;
13406 bfd_vma text_offset = text_sec->output_section->vma
13407 + text_sec->output_offset
13408 + text_sec->size;
13409 bfd_vma exidx_offset = offset + out_index * 8;
13410 unsigned long prel31_offset;
13411
13412 /* Note: this is meant to be equivalent to an
13413 R_ARM_PREL31 relocation. These synthetic
13414 EXIDX_CANTUNWIND markers are not relocated by the
13415 usual BFD method. */
13416 prel31_offset = (text_offset - exidx_offset)
13417 & 0x7ffffffful;
13418
13419 /* First address we can't unwind. */
13420 bfd_put_32 (output_bfd, prel31_offset,
13421 &edited_contents[out_index * 8]);
13422
13423 /* Code for EXIDX_CANTUNWIND. */
13424 bfd_put_32 (output_bfd, 0x1,
13425 &edited_contents[out_index * 8 + 4]);
13426
13427 out_index++;
13428 add_to_offsets -= 8;
13429 }
13430 break;
13431 }
13432
13433 edit_node = edit_node->next;
13434 }
13435 }
13436 else
13437 {
13438 /* No more edits, copy remaining entries verbatim. */
13439 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13440 contents + in_index * 8, add_to_offsets);
13441 out_index++;
13442 in_index++;
13443 }
13444 }
13445
13446 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13447 bfd_set_section_contents (output_bfd, sec->output_section,
13448 edited_contents,
13449 (file_ptr) sec->output_offset, sec->size);
13450
13451 return TRUE;
13452 }
13453
13454 /* Fix code to point to Cortex-A8 erratum stubs. */
13455 if (globals->fix_cortex_a8)
13456 {
13457 struct a8_branch_to_stub_data data;
13458
13459 data.writing_section = sec;
13460 data.contents = contents;
13461
13462 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13463 &data);
13464 }
13465
13466 if (mapcount == 0)
13467 return FALSE;
13468
13469 if (globals->byteswap_code)
13470 {
13471 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13472
13473 ptr = map[0].vma;
13474 for (i = 0; i < mapcount; i++)
13475 {
13476 if (i == mapcount - 1)
13477 end = sec->size;
13478 else
13479 end = map[i + 1].vma;
13480
13481 switch (map[i].type)
13482 {
13483 case 'a':
13484 /* Byte swap code words. */
13485 while (ptr + 3 < end)
13486 {
13487 tmp = contents[ptr];
13488 contents[ptr] = contents[ptr + 3];
13489 contents[ptr + 3] = tmp;
13490 tmp = contents[ptr + 1];
13491 contents[ptr + 1] = contents[ptr + 2];
13492 contents[ptr + 2] = tmp;
13493 ptr += 4;
13494 }
13495 break;
13496
13497 case 't':
13498 /* Byte swap code halfwords. */
13499 while (ptr + 1 < end)
13500 {
13501 tmp = contents[ptr];
13502 contents[ptr] = contents[ptr + 1];
13503 contents[ptr + 1] = tmp;
13504 ptr += 2;
13505 }
13506 break;
13507
13508 case 'd':
13509 /* Leave data alone. */
13510 break;
13511 }
13512 ptr = end;
13513 }
13514 }
13515
13516 free (map);
13517 arm_data->mapcount = 0;
13518 arm_data->mapsize = 0;
13519 arm_data->map = NULL;
13520 unrecord_section_with_arm_elf_section_data (sec);
13521
13522 return FALSE;
13523 }
13524
13525 static void
13526 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13527 asection * sec,
13528 void * ignore ATTRIBUTE_UNUSED)
13529 {
13530 unrecord_section_with_arm_elf_section_data (sec);
13531 }
13532
13533 static bfd_boolean
13534 elf32_arm_close_and_cleanup (bfd * abfd)
13535 {
13536 if (abfd->sections)
13537 bfd_map_over_sections (abfd,
13538 unrecord_section_via_map_over_sections,
13539 NULL);
13540
13541 return _bfd_elf_close_and_cleanup (abfd);
13542 }
13543
13544 static bfd_boolean
13545 elf32_arm_bfd_free_cached_info (bfd * abfd)
13546 {
13547 if (abfd->sections)
13548 bfd_map_over_sections (abfd,
13549 unrecord_section_via_map_over_sections,
13550 NULL);
13551
13552 return _bfd_free_cached_info (abfd);
13553 }
13554
13555 /* Display STT_ARM_TFUNC symbols as functions. */
13556
13557 static void
13558 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13559 asymbol *asym)
13560 {
13561 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13562
13563 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13564 elfsym->symbol.flags |= BSF_FUNCTION;
13565 }
13566
13567
13568 /* Mangle thumb function symbols as we read them in. */
13569
13570 static bfd_boolean
13571 elf32_arm_swap_symbol_in (bfd * abfd,
13572 const void *psrc,
13573 const void *pshn,
13574 Elf_Internal_Sym *dst)
13575 {
13576 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13577 return FALSE;
13578
13579 /* New EABI objects mark thumb function symbols by setting the low bit of
13580 the address. Turn these into STT_ARM_TFUNC. */
13581 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13582 && (dst->st_value & 1))
13583 {
13584 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13585 dst->st_value &= ~(bfd_vma) 1;
13586 }
13587 return TRUE;
13588 }
13589
13590
13591 /* Mangle thumb function symbols as we write them out. */
13592
13593 static void
13594 elf32_arm_swap_symbol_out (bfd *abfd,
13595 const Elf_Internal_Sym *src,
13596 void *cdst,
13597 void *shndx)
13598 {
13599 Elf_Internal_Sym newsym;
13600
13601 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13602 of the address set, as per the new EABI. We do this unconditionally
13603 because objcopy does not set the elf header flags until after
13604 it writes out the symbol table. */
13605 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13606 {
13607 newsym = *src;
13608 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13609 if (newsym.st_shndx != SHN_UNDEF)
13610 {
13611 /* Do this only for defined symbols. At link type, the static
13612 linker will simulate the work of dynamic linker of resolving
13613 symbols and will carry over the thumbness of found symbols to
13614 the output symbol table. It's not clear how it happens, but
13615 the thumbness of undefined symbols can well be different at
13616 runtime, and writing '1' for them will be confusing for users
13617 and possibly for dynamic linker itself.
13618 */
13619 newsym.st_value |= 1;
13620 }
13621
13622 src = &newsym;
13623 }
13624 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13625 }
13626
13627 /* Add the PT_ARM_EXIDX program header. */
13628
13629 static bfd_boolean
13630 elf32_arm_modify_segment_map (bfd *abfd,
13631 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13632 {
13633 struct elf_segment_map *m;
13634 asection *sec;
13635
13636 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13637 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13638 {
13639 /* If there is already a PT_ARM_EXIDX header, then we do not
13640 want to add another one. This situation arises when running
13641 "strip"; the input binary already has the header. */
13642 m = elf_tdata (abfd)->segment_map;
13643 while (m && m->p_type != PT_ARM_EXIDX)
13644 m = m->next;
13645 if (!m)
13646 {
13647 m = (struct elf_segment_map *)
13648 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13649 if (m == NULL)
13650 return FALSE;
13651 m->p_type = PT_ARM_EXIDX;
13652 m->count = 1;
13653 m->sections[0] = sec;
13654
13655 m->next = elf_tdata (abfd)->segment_map;
13656 elf_tdata (abfd)->segment_map = m;
13657 }
13658 }
13659
13660 return TRUE;
13661 }
13662
13663 /* We may add a PT_ARM_EXIDX program header. */
13664
13665 static int
13666 elf32_arm_additional_program_headers (bfd *abfd,
13667 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13668 {
13669 asection *sec;
13670
13671 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13672 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13673 return 1;
13674 else
13675 return 0;
13676 }
13677
13678 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13679
13680 static bfd_boolean
13681 elf32_arm_is_function_type (unsigned int type)
13682 {
13683 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13684 }
13685
13686 /* We use this to override swap_symbol_in and swap_symbol_out. */
13687 const struct elf_size_info elf32_arm_size_info =
13688 {
13689 sizeof (Elf32_External_Ehdr),
13690 sizeof (Elf32_External_Phdr),
13691 sizeof (Elf32_External_Shdr),
13692 sizeof (Elf32_External_Rel),
13693 sizeof (Elf32_External_Rela),
13694 sizeof (Elf32_External_Sym),
13695 sizeof (Elf32_External_Dyn),
13696 sizeof (Elf_External_Note),
13697 4,
13698 1,
13699 32, 2,
13700 ELFCLASS32, EV_CURRENT,
13701 bfd_elf32_write_out_phdrs,
13702 bfd_elf32_write_shdrs_and_ehdr,
13703 bfd_elf32_checksum_contents,
13704 bfd_elf32_write_relocs,
13705 elf32_arm_swap_symbol_in,
13706 elf32_arm_swap_symbol_out,
13707 bfd_elf32_slurp_reloc_table,
13708 bfd_elf32_slurp_symbol_table,
13709 bfd_elf32_swap_dyn_in,
13710 bfd_elf32_swap_dyn_out,
13711 bfd_elf32_swap_reloc_in,
13712 bfd_elf32_swap_reloc_out,
13713 bfd_elf32_swap_reloca_in,
13714 bfd_elf32_swap_reloca_out
13715 };
13716
13717 #define ELF_ARCH bfd_arch_arm
13718 #define ELF_MACHINE_CODE EM_ARM
13719 #ifdef __QNXTARGET__
13720 #define ELF_MAXPAGESIZE 0x1000
13721 #else
13722 #define ELF_MAXPAGESIZE 0x8000
13723 #endif
13724 #define ELF_MINPAGESIZE 0x1000
13725 #define ELF_COMMONPAGESIZE 0x1000
13726
13727 #define bfd_elf32_mkobject elf32_arm_mkobject
13728
13729 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13730 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13731 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13732 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13733 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13734 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13735 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13736 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13737 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13738 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13739 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13740 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13741 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13742 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13743 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13744
13745 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13746 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13747 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13748 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13749 #define elf_backend_check_relocs elf32_arm_check_relocs
13750 #define elf_backend_relocate_section elf32_arm_relocate_section
13751 #define elf_backend_write_section elf32_arm_write_section
13752 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13753 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13754 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13755 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13756 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13757 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13758 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13759 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13760 #define elf_backend_object_p elf32_arm_object_p
13761 #define elf_backend_section_flags elf32_arm_section_flags
13762 #define elf_backend_fake_sections elf32_arm_fake_sections
13763 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13764 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13765 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13766 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13767 #define elf_backend_size_info elf32_arm_size_info
13768 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13769 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13770 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13771 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13772 #define elf_backend_is_function_type elf32_arm_is_function_type
13773
13774 #define elf_backend_can_refcount 1
13775 #define elf_backend_can_gc_sections 1
13776 #define elf_backend_plt_readonly 1
13777 #define elf_backend_want_got_plt 1
13778 #define elf_backend_want_plt_sym 0
13779 #define elf_backend_may_use_rel_p 1
13780 #define elf_backend_may_use_rela_p 0
13781 #define elf_backend_default_use_rela_p 0
13782
13783 #define elf_backend_got_header_size 12
13784
13785 #undef elf_backend_obj_attrs_vendor
13786 #define elf_backend_obj_attrs_vendor "aeabi"
13787 #undef elf_backend_obj_attrs_section
13788 #define elf_backend_obj_attrs_section ".ARM.attributes"
13789 #undef elf_backend_obj_attrs_arg_type
13790 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13791 #undef elf_backend_obj_attrs_section_type
13792 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13793 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13794
13795 #include "elf32-target.h"
13796
13797 /* VxWorks Targets. */
13798
13799 #undef TARGET_LITTLE_SYM
13800 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13801 #undef TARGET_LITTLE_NAME
13802 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13803 #undef TARGET_BIG_SYM
13804 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13805 #undef TARGET_BIG_NAME
13806 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13807
13808 /* Like elf32_arm_link_hash_table_create -- but overrides
13809 appropriately for VxWorks. */
13810
13811 static struct bfd_link_hash_table *
13812 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13813 {
13814 struct bfd_link_hash_table *ret;
13815
13816 ret = elf32_arm_link_hash_table_create (abfd);
13817 if (ret)
13818 {
13819 struct elf32_arm_link_hash_table *htab
13820 = (struct elf32_arm_link_hash_table *) ret;
13821 htab->use_rel = 0;
13822 htab->vxworks_p = 1;
13823 }
13824 return ret;
13825 }
13826
13827 static void
13828 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13829 {
13830 elf32_arm_final_write_processing (abfd, linker);
13831 elf_vxworks_final_write_processing (abfd, linker);
13832 }
13833
13834 #undef elf32_bed
13835 #define elf32_bed elf32_arm_vxworks_bed
13836
13837 #undef bfd_elf32_bfd_link_hash_table_create
13838 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13839 #undef elf_backend_add_symbol_hook
13840 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13841 #undef elf_backend_final_write_processing
13842 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13843 #undef elf_backend_emit_relocs
13844 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13845
13846 #undef elf_backend_may_use_rel_p
13847 #define elf_backend_may_use_rel_p 0
13848 #undef elf_backend_may_use_rela_p
13849 #define elf_backend_may_use_rela_p 1
13850 #undef elf_backend_default_use_rela_p
13851 #define elf_backend_default_use_rela_p 1
13852 #undef elf_backend_want_plt_sym
13853 #define elf_backend_want_plt_sym 1
13854 #undef ELF_MAXPAGESIZE
13855 #define ELF_MAXPAGESIZE 0x1000
13856
13857 #include "elf32-target.h"
13858
13859
13860 /* Merge backend specific data from an object file to the output
13861 object file when linking. */
13862
13863 static bfd_boolean
13864 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
13865 {
13866 flagword out_flags;
13867 flagword in_flags;
13868 bfd_boolean flags_compatible = TRUE;
13869 asection *sec;
13870
13871 /* Check if we have the same endianess. */
13872 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
13873 return FALSE;
13874
13875 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13876 return TRUE;
13877
13878 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
13879 return FALSE;
13880
13881 /* The input BFD must have had its flags initialised. */
13882 /* The following seems bogus to me -- The flags are initialized in
13883 the assembler but I don't think an elf_flags_init field is
13884 written into the object. */
13885 /* BFD_ASSERT (elf_flags_init (ibfd)); */
13886
13887 in_flags = elf_elfheader (ibfd)->e_flags;
13888 out_flags = elf_elfheader (obfd)->e_flags;
13889
13890 /* In theory there is no reason why we couldn't handle this. However
13891 in practice it isn't even close to working and there is no real
13892 reason to want it. */
13893 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
13894 && !(ibfd->flags & DYNAMIC)
13895 && (in_flags & EF_ARM_BE8))
13896 {
13897 _bfd_error_handler (_("error: %B is already in final BE8 format"),
13898 ibfd);
13899 return FALSE;
13900 }
13901
13902 if (!elf_flags_init (obfd))
13903 {
13904 /* If the input is the default architecture and had the default
13905 flags then do not bother setting the flags for the output
13906 architecture, instead allow future merges to do this. If no
13907 future merges ever set these flags then they will retain their
13908 uninitialised values, which surprise surprise, correspond
13909 to the default values. */
13910 if (bfd_get_arch_info (ibfd)->the_default
13911 && elf_elfheader (ibfd)->e_flags == 0)
13912 return TRUE;
13913
13914 elf_flags_init (obfd) = TRUE;
13915 elf_elfheader (obfd)->e_flags = in_flags;
13916
13917 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
13918 && bfd_get_arch_info (obfd)->the_default)
13919 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
13920
13921 return TRUE;
13922 }
13923
13924 /* Determine what should happen if the input ARM architecture
13925 does not match the output ARM architecture. */
13926 if (! bfd_arm_merge_machines (ibfd, obfd))
13927 return FALSE;
13928
13929 /* Identical flags must be compatible. */
13930 if (in_flags == out_flags)
13931 return TRUE;
13932
13933 /* Check to see if the input BFD actually contains any sections. If
13934 not, its flags may not have been initialised either, but it
13935 cannot actually cause any incompatiblity. Do not short-circuit
13936 dynamic objects; their section list may be emptied by
13937 elf_link_add_object_symbols.
13938
13939 Also check to see if there are no code sections in the input.
13940 In this case there is no need to check for code specific flags.
13941 XXX - do we need to worry about floating-point format compatability
13942 in data sections ? */
13943 if (!(ibfd->flags & DYNAMIC))
13944 {
13945 bfd_boolean null_input_bfd = TRUE;
13946 bfd_boolean only_data_sections = TRUE;
13947
13948 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
13949 {
13950 /* Ignore synthetic glue sections. */
13951 if (strcmp (sec->name, ".glue_7")
13952 && strcmp (sec->name, ".glue_7t"))
13953 {
13954 if ((bfd_get_section_flags (ibfd, sec)
13955 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
13956 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
13957 only_data_sections = FALSE;
13958
13959 null_input_bfd = FALSE;
13960 break;
13961 }
13962 }
13963
13964 if (null_input_bfd || only_data_sections)
13965 return TRUE;
13966 }
13967
13968 /* Complain about various flag mismatches. */
13969 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
13970 EF_ARM_EABI_VERSION (out_flags)))
13971 {
13972 _bfd_error_handler
13973 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
13974 ibfd, obfd,
13975 (in_flags & EF_ARM_EABIMASK) >> 24,
13976 (out_flags & EF_ARM_EABIMASK) >> 24);
13977 return FALSE;
13978 }
13979
13980 /* Not sure what needs to be checked for EABI versions >= 1. */
13981 /* VxWorks libraries do not use these flags. */
13982 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
13983 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
13984 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
13985 {
13986 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13987 {
13988 _bfd_error_handler
13989 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
13990 ibfd, obfd,
13991 in_flags & EF_ARM_APCS_26 ? 26 : 32,
13992 out_flags & EF_ARM_APCS_26 ? 26 : 32);
13993 flags_compatible = FALSE;
13994 }
13995
13996 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13997 {
13998 if (in_flags & EF_ARM_APCS_FLOAT)
13999 _bfd_error_handler
14000 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14001 ibfd, obfd);
14002 else
14003 _bfd_error_handler
14004 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14005 ibfd, obfd);
14006
14007 flags_compatible = FALSE;
14008 }
14009
14010 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14011 {
14012 if (in_flags & EF_ARM_VFP_FLOAT)
14013 _bfd_error_handler
14014 (_("error: %B uses VFP instructions, whereas %B does not"),
14015 ibfd, obfd);
14016 else
14017 _bfd_error_handler
14018 (_("error: %B uses FPA instructions, whereas %B does not"),
14019 ibfd, obfd);
14020
14021 flags_compatible = FALSE;
14022 }
14023
14024 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14025 {
14026 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14027 _bfd_error_handler
14028 (_("error: %B uses Maverick instructions, whereas %B does not"),
14029 ibfd, obfd);
14030 else
14031 _bfd_error_handler
14032 (_("error: %B does not use Maverick instructions, whereas %B does"),
14033 ibfd, obfd);
14034
14035 flags_compatible = FALSE;
14036 }
14037
14038 #ifdef EF_ARM_SOFT_FLOAT
14039 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14040 {
14041 /* We can allow interworking between code that is VFP format
14042 layout, and uses either soft float or integer regs for
14043 passing floating point arguments and results. We already
14044 know that the APCS_FLOAT flags match; similarly for VFP
14045 flags. */
14046 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14047 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14048 {
14049 if (in_flags & EF_ARM_SOFT_FLOAT)
14050 _bfd_error_handler
14051 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14052 ibfd, obfd);
14053 else
14054 _bfd_error_handler
14055 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14056 ibfd, obfd);
14057
14058 flags_compatible = FALSE;
14059 }
14060 }
14061 #endif
14062
14063 /* Interworking mismatch is only a warning. */
14064 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14065 {
14066 if (in_flags & EF_ARM_INTERWORK)
14067 {
14068 _bfd_error_handler
14069 (_("Warning: %B supports interworking, whereas %B does not"),
14070 ibfd, obfd);
14071 }
14072 else
14073 {
14074 _bfd_error_handler
14075 (_("Warning: %B does not support interworking, whereas %B does"),
14076 ibfd, obfd);
14077 }
14078 }
14079 }
14080
14081 return flags_compatible;
14082 }
14083
14084
14085 /* Symbian OS Targets. */
14086
14087 #undef TARGET_LITTLE_SYM
14088 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14089 #undef TARGET_LITTLE_NAME
14090 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14091 #undef TARGET_BIG_SYM
14092 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14093 #undef TARGET_BIG_NAME
14094 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14095
14096 /* Like elf32_arm_link_hash_table_create -- but overrides
14097 appropriately for Symbian OS. */
14098
14099 static struct bfd_link_hash_table *
14100 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14101 {
14102 struct bfd_link_hash_table *ret;
14103
14104 ret = elf32_arm_link_hash_table_create (abfd);
14105 if (ret)
14106 {
14107 struct elf32_arm_link_hash_table *htab
14108 = (struct elf32_arm_link_hash_table *)ret;
14109 /* There is no PLT header for Symbian OS. */
14110 htab->plt_header_size = 0;
14111 /* The PLT entries are each one instruction and one word. */
14112 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14113 htab->symbian_p = 1;
14114 /* Symbian uses armv5t or above, so use_blx is always true. */
14115 htab->use_blx = 1;
14116 htab->root.is_relocatable_executable = 1;
14117 }
14118 return ret;
14119 }
14120
14121 static const struct bfd_elf_special_section
14122 elf32_arm_symbian_special_sections[] =
14123 {
14124 /* In a BPABI executable, the dynamic linking sections do not go in
14125 the loadable read-only segment. The post-linker may wish to
14126 refer to these sections, but they are not part of the final
14127 program image. */
14128 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14129 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14130 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14131 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14132 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14133 /* These sections do not need to be writable as the SymbianOS
14134 postlinker will arrange things so that no dynamic relocation is
14135 required. */
14136 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14137 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14138 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14139 { NULL, 0, 0, 0, 0 }
14140 };
14141
14142 static void
14143 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14144 struct bfd_link_info *link_info)
14145 {
14146 /* BPABI objects are never loaded directly by an OS kernel; they are
14147 processed by a postlinker first, into an OS-specific format. If
14148 the D_PAGED bit is set on the file, BFD will align segments on
14149 page boundaries, so that an OS can directly map the file. With
14150 BPABI objects, that just results in wasted space. In addition,
14151 because we clear the D_PAGED bit, map_sections_to_segments will
14152 recognize that the program headers should not be mapped into any
14153 loadable segment. */
14154 abfd->flags &= ~D_PAGED;
14155 elf32_arm_begin_write_processing (abfd, link_info);
14156 }
14157
14158 static bfd_boolean
14159 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14160 struct bfd_link_info *info)
14161 {
14162 struct elf_segment_map *m;
14163 asection *dynsec;
14164
14165 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14166 segment. However, because the .dynamic section is not marked
14167 with SEC_LOAD, the generic ELF code will not create such a
14168 segment. */
14169 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14170 if (dynsec)
14171 {
14172 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14173 if (m->p_type == PT_DYNAMIC)
14174 break;
14175
14176 if (m == NULL)
14177 {
14178 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14179 m->next = elf_tdata (abfd)->segment_map;
14180 elf_tdata (abfd)->segment_map = m;
14181 }
14182 }
14183
14184 /* Also call the generic arm routine. */
14185 return elf32_arm_modify_segment_map (abfd, info);
14186 }
14187
14188 /* Return address for Ith PLT stub in section PLT, for relocation REL
14189 or (bfd_vma) -1 if it should not be included. */
14190
14191 static bfd_vma
14192 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14193 const arelent *rel ATTRIBUTE_UNUSED)
14194 {
14195 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14196 }
14197
14198
14199 #undef elf32_bed
14200 #define elf32_bed elf32_arm_symbian_bed
14201
14202 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14203 will process them and then discard them. */
14204 #undef ELF_DYNAMIC_SEC_FLAGS
14205 #define ELF_DYNAMIC_SEC_FLAGS \
14206 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14207
14208 #undef elf_backend_add_symbol_hook
14209 #undef elf_backend_emit_relocs
14210
14211 #undef bfd_elf32_bfd_link_hash_table_create
14212 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14213 #undef elf_backend_special_sections
14214 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14215 #undef elf_backend_begin_write_processing
14216 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14217 #undef elf_backend_final_write_processing
14218 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14219
14220 #undef elf_backend_modify_segment_map
14221 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14222
14223 /* There is no .got section for BPABI objects, and hence no header. */
14224 #undef elf_backend_got_header_size
14225 #define elf_backend_got_header_size 0
14226
14227 /* Similarly, there is no .got.plt section. */
14228 #undef elf_backend_want_got_plt
14229 #define elf_backend_want_got_plt 0
14230
14231 #undef elf_backend_plt_sym_val
14232 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14233
14234 #undef elf_backend_may_use_rel_p
14235 #define elf_backend_may_use_rel_p 1
14236 #undef elf_backend_may_use_rela_p
14237 #define elf_backend_may_use_rela_p 0
14238 #undef elf_backend_default_use_rela_p
14239 #define elf_backend_default_use_rela_p 0
14240 #undef elf_backend_want_plt_sym
14241 #define elf_backend_want_plt_sym 0
14242 #undef ELF_MAXPAGESIZE
14243 #define ELF_MAXPAGESIZE 0x8000
14244
14245 #include "elf32-target.h"
This page took 0.308541 seconds and 5 git commands to generate.