2009-07-10 Doug Kwan <dougkwan@google.com>
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_TDATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2519
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2523
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2526 {
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2529
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2532
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2535
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2538
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2542
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2544 veneers. */
2545 bfd_size_type vfp11_erratum_glue_size;
2546
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2552
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2555
2556 /* Nonzero to output a BE8 image. */
2557 int byteswap_code;
2558
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2561 int target1_is_rel;
2562
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2564 int target2_reloc;
2565
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2569 int fix_v4bx;
2570
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2572 int fix_cortex_a8;
2573
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2575 int use_blx;
2576
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2580
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2583
2584 /* Nonzero to force PIC branch veneers. */
2585 int pic_veneer;
2586
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2589
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2592
2593 /* True if the target system is VxWorks. */
2594 int vxworks_p;
2595
2596 /* True if the target system is Symbian OS. */
2597 int symbian_p;
2598
2599 /* True if the target uses REL relocations. */
2600 int use_rel;
2601
2602 /* Short-cuts to get to dynamic linker sections. */
2603 asection *sgot;
2604 asection *sgotplt;
2605 asection *srelgot;
2606 asection *splt;
2607 asection *srelplt;
2608 asection *sdynbss;
2609 asection *srelbss;
2610
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2612 asection *srelplt2;
2613
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2615 union
2616 {
2617 bfd_signed_vma refcount;
2618 bfd_vma offset;
2619 } tls_ldm_got;
2620
2621 /* Small local sym cache. */
2622 struct sym_cache sym_cache;
2623
2624 /* For convenience in allocate_dynrelocs. */
2625 bfd * obfd;
2626
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2629
2630 /* Linker stub bfd. */
2631 bfd *stub_bfd;
2632
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2636
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2639 struct map_stub
2640 {
2641 /* This is the section to which stubs in the group will be
2642 attached. */
2643 asection *link_sec;
2644 /* The stub section. */
2645 asection *stub_sec;
2646 } *stub_group;
2647
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2650 int top_index;
2651 asection **input_list;
2652 };
2653
2654 /* Create an entry in an ARM ELF linker hash table. */
2655
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2660 {
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2663
2664 /* Allocate the structure if it has not already been allocated by a
2665 subclass. */
2666 if (ret == NULL)
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2668 if (ret == NULL)
2669 return (struct bfd_hash_entry *) ret;
2670
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2674 table, string));
2675 if (ret != NULL)
2676 {
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2683
2684 ret->stub_cache = NULL;
2685 }
2686
2687 return (struct bfd_hash_entry *) ret;
2688 }
2689
2690 /* Initialize an entry in the stub hash table. */
2691
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2695 const char *string)
2696 {
2697 /* Allocate the structure if it has not already been allocated by a
2698 subclass. */
2699 if (entry == NULL)
2700 {
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2703 if (entry == NULL)
2704 return entry;
2705 }
2706
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2709 if (entry != NULL)
2710 {
2711 struct elf32_arm_stub_hash_entry *eh;
2712
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->target_addend = 0;
2720 eh->orig_insn = 0;
2721 eh->stub_type = arm_stub_none;
2722 eh->stub_size = 0;
2723 eh->stub_template = NULL;
2724 eh->stub_template_size = 0;
2725 eh->h = NULL;
2726 eh->id_sec = NULL;
2727 }
2728
2729 return entry;
2730 }
2731
2732 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2733 shortcuts to them in our hash table. */
2734
2735 static bfd_boolean
2736 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2737 {
2738 struct elf32_arm_link_hash_table *htab;
2739
2740 htab = elf32_arm_hash_table (info);
2741 /* BPABI objects never have a GOT, or associated sections. */
2742 if (htab->symbian_p)
2743 return TRUE;
2744
2745 if (! _bfd_elf_create_got_section (dynobj, info))
2746 return FALSE;
2747
2748 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2749 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2750 if (!htab->sgot || !htab->sgotplt)
2751 abort ();
2752
2753 htab->srelgot = bfd_get_section_by_name (dynobj,
2754 RELOC_SECTION (htab, ".got"));
2755 if (htab->srelgot == NULL)
2756 return FALSE;
2757 return TRUE;
2758 }
2759
2760 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2761 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2762 hash table. */
2763
2764 static bfd_boolean
2765 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2766 {
2767 struct elf32_arm_link_hash_table *htab;
2768
2769 htab = elf32_arm_hash_table (info);
2770 if (!htab->sgot && !create_got_section (dynobj, info))
2771 return FALSE;
2772
2773 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2774 return FALSE;
2775
2776 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2777 htab->srelplt = bfd_get_section_by_name (dynobj,
2778 RELOC_SECTION (htab, ".plt"));
2779 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2780 if (!info->shared)
2781 htab->srelbss = bfd_get_section_by_name (dynobj,
2782 RELOC_SECTION (htab, ".bss"));
2783
2784 if (htab->vxworks_p)
2785 {
2786 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2787 return FALSE;
2788
2789 if (info->shared)
2790 {
2791 htab->plt_header_size = 0;
2792 htab->plt_entry_size
2793 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2794 }
2795 else
2796 {
2797 htab->plt_header_size
2798 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2799 htab->plt_entry_size
2800 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2801 }
2802 }
2803
2804 if (!htab->splt
2805 || !htab->srelplt
2806 || !htab->sdynbss
2807 || (!info->shared && !htab->srelbss))
2808 abort ();
2809
2810 return TRUE;
2811 }
2812
2813 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2814
2815 static void
2816 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2817 struct elf_link_hash_entry *dir,
2818 struct elf_link_hash_entry *ind)
2819 {
2820 struct elf32_arm_link_hash_entry *edir, *eind;
2821
2822 edir = (struct elf32_arm_link_hash_entry *) dir;
2823 eind = (struct elf32_arm_link_hash_entry *) ind;
2824
2825 if (eind->relocs_copied != NULL)
2826 {
2827 if (edir->relocs_copied != NULL)
2828 {
2829 struct elf32_arm_relocs_copied **pp;
2830 struct elf32_arm_relocs_copied *p;
2831
2832 /* Add reloc counts against the indirect sym to the direct sym
2833 list. Merge any entries against the same section. */
2834 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2835 {
2836 struct elf32_arm_relocs_copied *q;
2837
2838 for (q = edir->relocs_copied; q != NULL; q = q->next)
2839 if (q->section == p->section)
2840 {
2841 q->pc_count += p->pc_count;
2842 q->count += p->count;
2843 *pp = p->next;
2844 break;
2845 }
2846 if (q == NULL)
2847 pp = &p->next;
2848 }
2849 *pp = edir->relocs_copied;
2850 }
2851
2852 edir->relocs_copied = eind->relocs_copied;
2853 eind->relocs_copied = NULL;
2854 }
2855
2856 if (ind->root.type == bfd_link_hash_indirect)
2857 {
2858 /* Copy over PLT info. */
2859 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2860 eind->plt_thumb_refcount = 0;
2861 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2862 eind->plt_maybe_thumb_refcount = 0;
2863
2864 if (dir->got.refcount <= 0)
2865 {
2866 edir->tls_type = eind->tls_type;
2867 eind->tls_type = GOT_UNKNOWN;
2868 }
2869 }
2870
2871 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2872 }
2873
2874 /* Create an ARM elf linker hash table. */
2875
2876 static struct bfd_link_hash_table *
2877 elf32_arm_link_hash_table_create (bfd *abfd)
2878 {
2879 struct elf32_arm_link_hash_table *ret;
2880 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2881
2882 ret = bfd_malloc (amt);
2883 if (ret == NULL)
2884 return NULL;
2885
2886 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2887 elf32_arm_link_hash_newfunc,
2888 sizeof (struct elf32_arm_link_hash_entry)))
2889 {
2890 free (ret);
2891 return NULL;
2892 }
2893
2894 ret->sgot = NULL;
2895 ret->sgotplt = NULL;
2896 ret->srelgot = NULL;
2897 ret->splt = NULL;
2898 ret->srelplt = NULL;
2899 ret->sdynbss = NULL;
2900 ret->srelbss = NULL;
2901 ret->srelplt2 = NULL;
2902 ret->thumb_glue_size = 0;
2903 ret->arm_glue_size = 0;
2904 ret->bx_glue_size = 0;
2905 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2906 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2907 ret->vfp11_erratum_glue_size = 0;
2908 ret->num_vfp11_fixes = 0;
2909 ret->fix_cortex_a8 = 0;
2910 ret->bfd_of_glue_owner = NULL;
2911 ret->byteswap_code = 0;
2912 ret->target1_is_rel = 0;
2913 ret->target2_reloc = R_ARM_NONE;
2914 #ifdef FOUR_WORD_PLT
2915 ret->plt_header_size = 16;
2916 ret->plt_entry_size = 16;
2917 #else
2918 ret->plt_header_size = 20;
2919 ret->plt_entry_size = 12;
2920 #endif
2921 ret->fix_v4bx = 0;
2922 ret->use_blx = 0;
2923 ret->vxworks_p = 0;
2924 ret->symbian_p = 0;
2925 ret->use_rel = 1;
2926 ret->sym_cache.abfd = NULL;
2927 ret->obfd = abfd;
2928 ret->tls_ldm_got.refcount = 0;
2929 ret->stub_bfd = NULL;
2930 ret->add_stub_section = NULL;
2931 ret->layout_sections_again = NULL;
2932 ret->stub_group = NULL;
2933 ret->bfd_count = 0;
2934 ret->top_index = 0;
2935 ret->input_list = NULL;
2936
2937 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2938 sizeof (struct elf32_arm_stub_hash_entry)))
2939 {
2940 free (ret);
2941 return NULL;
2942 }
2943
2944 return &ret->root.root;
2945 }
2946
2947 /* Free the derived linker hash table. */
2948
2949 static void
2950 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2951 {
2952 struct elf32_arm_link_hash_table *ret
2953 = (struct elf32_arm_link_hash_table *) hash;
2954
2955 bfd_hash_table_free (&ret->stub_hash_table);
2956 _bfd_generic_link_hash_table_free (hash);
2957 }
2958
2959 /* Determine if we're dealing with a Thumb only architecture. */
2960
2961 static bfd_boolean
2962 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2963 {
2964 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2965 Tag_CPU_arch);
2966 int profile;
2967
2968 if (arch != TAG_CPU_ARCH_V7)
2969 return FALSE;
2970
2971 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2972 Tag_CPU_arch_profile);
2973
2974 return profile == 'M';
2975 }
2976
2977 /* Determine if we're dealing with a Thumb-2 object. */
2978
2979 static bfd_boolean
2980 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2981 {
2982 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2983 Tag_CPU_arch);
2984 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2985 }
2986
2987 static bfd_boolean
2988 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2989 {
2990 switch (stub_type)
2991 {
2992 case arm_stub_long_branch_thumb_only:
2993 case arm_stub_long_branch_v4t_thumb_arm:
2994 case arm_stub_short_branch_v4t_thumb_arm:
2995 case arm_stub_long_branch_v4t_thumb_arm_pic:
2996 case arm_stub_long_branch_thumb_only_pic:
2997 return TRUE;
2998 case arm_stub_none:
2999 BFD_FAIL ();
3000 return FALSE;
3001 break;
3002 default:
3003 return FALSE;
3004 }
3005 }
3006
3007 /* Determine the type of stub needed, if any, for a call. */
3008
3009 static enum elf32_arm_stub_type
3010 arm_type_of_stub (struct bfd_link_info *info,
3011 asection *input_sec,
3012 const Elf_Internal_Rela *rel,
3013 unsigned char st_type,
3014 struct elf32_arm_link_hash_entry *hash,
3015 bfd_vma destination,
3016 asection *sym_sec,
3017 bfd *input_bfd,
3018 const char *name)
3019 {
3020 bfd_vma location;
3021 bfd_signed_vma branch_offset;
3022 unsigned int r_type;
3023 struct elf32_arm_link_hash_table * globals;
3024 int thumb2;
3025 int thumb_only;
3026 enum elf32_arm_stub_type stub_type = arm_stub_none;
3027 int use_plt = 0;
3028
3029 /* We don't know the actual type of destination in case it is of
3030 type STT_SECTION: give up. */
3031 if (st_type == STT_SECTION)
3032 return stub_type;
3033
3034 globals = elf32_arm_hash_table (info);
3035
3036 thumb_only = using_thumb_only (globals);
3037
3038 thumb2 = using_thumb2 (globals);
3039
3040 /* Determine where the call point is. */
3041 location = (input_sec->output_offset
3042 + input_sec->output_section->vma
3043 + rel->r_offset);
3044
3045 branch_offset = (bfd_signed_vma)(destination - location);
3046
3047 r_type = ELF32_R_TYPE (rel->r_info);
3048
3049 /* Keep a simpler condition, for the sake of clarity. */
3050 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3051 {
3052 use_plt = 1;
3053 /* Note when dealing with PLT entries: the main PLT stub is in
3054 ARM mode, so if the branch is in Thumb mode, another
3055 Thumb->ARM stub will be inserted later just before the ARM
3056 PLT stub. We don't take this extra distance into account
3057 here, because if a long branch stub is needed, we'll add a
3058 Thumb->Arm one and branch directly to the ARM PLT entry
3059 because it avoids spreading offset corrections in several
3060 places. */
3061 }
3062
3063 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3064 {
3065 /* Handle cases where:
3066 - this call goes too far (different Thumb/Thumb2 max
3067 distance)
3068 - it's a Thumb->Arm call and blx is not available, or it's a
3069 Thumb->Arm branch (not bl). A stub is needed in this case,
3070 but only if this call is not through a PLT entry. Indeed,
3071 PLT stubs handle mode switching already.
3072 */
3073 if ((!thumb2
3074 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3075 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3076 || (thumb2
3077 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3078 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3079 || ((st_type != STT_ARM_TFUNC)
3080 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3081 || (r_type == R_ARM_THM_JUMP24))
3082 && !use_plt))
3083 {
3084 if (st_type == STT_ARM_TFUNC)
3085 {
3086 /* Thumb to thumb. */
3087 if (!thumb_only)
3088 {
3089 stub_type = (info->shared | globals->pic_veneer)
3090 /* PIC stubs. */
3091 ? ((globals->use_blx
3092 && (r_type ==R_ARM_THM_CALL))
3093 /* V5T and above. Stub starts with ARM code, so
3094 we must be able to switch mode before
3095 reaching it, which is only possible for 'bl'
3096 (ie R_ARM_THM_CALL relocation). */
3097 ? arm_stub_long_branch_any_thumb_pic
3098 /* On V4T, use Thumb code only. */
3099 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3100
3101 /* non-PIC stubs. */
3102 : ((globals->use_blx
3103 && (r_type ==R_ARM_THM_CALL))
3104 /* V5T and above. */
3105 ? arm_stub_long_branch_any_any
3106 /* V4T. */
3107 : arm_stub_long_branch_v4t_thumb_thumb);
3108 }
3109 else
3110 {
3111 stub_type = (info->shared | globals->pic_veneer)
3112 /* PIC stub. */
3113 ? arm_stub_long_branch_thumb_only_pic
3114 /* non-PIC stub. */
3115 : arm_stub_long_branch_thumb_only;
3116 }
3117 }
3118 else
3119 {
3120 /* Thumb to arm. */
3121 if (sym_sec != NULL
3122 && sym_sec->owner != NULL
3123 && !INTERWORK_FLAG (sym_sec->owner))
3124 {
3125 (*_bfd_error_handler)
3126 (_("%B(%s): warning: interworking not enabled.\n"
3127 " first occurrence: %B: Thumb call to ARM"),
3128 sym_sec->owner, input_bfd, name);
3129 }
3130
3131 stub_type = (info->shared | globals->pic_veneer)
3132 /* PIC stubs. */
3133 ? ((globals->use_blx
3134 && (r_type ==R_ARM_THM_CALL))
3135 /* V5T and above. */
3136 ? arm_stub_long_branch_any_arm_pic
3137 /* V4T PIC stub. */
3138 : arm_stub_long_branch_v4t_thumb_arm_pic)
3139
3140 /* non-PIC stubs. */
3141 : ((globals->use_blx
3142 && (r_type ==R_ARM_THM_CALL))
3143 /* V5T and above. */
3144 ? arm_stub_long_branch_any_any
3145 /* V4T. */
3146 : arm_stub_long_branch_v4t_thumb_arm);
3147
3148 /* Handle v4t short branches. */
3149 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3150 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3151 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3152 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3153 }
3154 }
3155 }
3156 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3157 {
3158 if (st_type == STT_ARM_TFUNC)
3159 {
3160 /* Arm to thumb. */
3161
3162 if (sym_sec != NULL
3163 && sym_sec->owner != NULL
3164 && !INTERWORK_FLAG (sym_sec->owner))
3165 {
3166 (*_bfd_error_handler)
3167 (_("%B(%s): warning: interworking not enabled.\n"
3168 " first occurrence: %B: ARM call to Thumb"),
3169 sym_sec->owner, input_bfd, name);
3170 }
3171
3172 /* We have an extra 2-bytes reach because of
3173 the mode change (bit 24 (H) of BLX encoding). */
3174 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3175 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3176 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3177 || (r_type == R_ARM_JUMP24)
3178 || (r_type == R_ARM_PLT32))
3179 {
3180 stub_type = (info->shared | globals->pic_veneer)
3181 /* PIC stubs. */
3182 ? ((globals->use_blx)
3183 /* V5T and above. */
3184 ? arm_stub_long_branch_any_thumb_pic
3185 /* V4T stub. */
3186 : arm_stub_long_branch_v4t_arm_thumb_pic)
3187
3188 /* non-PIC stubs. */
3189 : ((globals->use_blx)
3190 /* V5T and above. */
3191 ? arm_stub_long_branch_any_any
3192 /* V4T. */
3193 : arm_stub_long_branch_v4t_arm_thumb);
3194 }
3195 }
3196 else
3197 {
3198 /* Arm to arm. */
3199 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3200 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3201 {
3202 stub_type = (info->shared | globals->pic_veneer)
3203 /* PIC stubs. */
3204 ? arm_stub_long_branch_any_arm_pic
3205 /* non-PIC stubs. */
3206 : arm_stub_long_branch_any_any;
3207 }
3208 }
3209 }
3210
3211 return stub_type;
3212 }
3213
3214 /* Build a name for an entry in the stub hash table. */
3215
3216 static char *
3217 elf32_arm_stub_name (const asection *input_section,
3218 const asection *sym_sec,
3219 const struct elf32_arm_link_hash_entry *hash,
3220 const Elf_Internal_Rela *rel)
3221 {
3222 char *stub_name;
3223 bfd_size_type len;
3224
3225 if (hash)
3226 {
3227 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3228 stub_name = bfd_malloc (len);
3229 if (stub_name != NULL)
3230 sprintf (stub_name, "%08x_%s+%x",
3231 input_section->id & 0xffffffff,
3232 hash->root.root.root.string,
3233 (int) rel->r_addend & 0xffffffff);
3234 }
3235 else
3236 {
3237 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3238 stub_name = bfd_malloc (len);
3239 if (stub_name != NULL)
3240 sprintf (stub_name, "%08x_%x:%x+%x",
3241 input_section->id & 0xffffffff,
3242 sym_sec->id & 0xffffffff,
3243 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3244 (int) rel->r_addend & 0xffffffff);
3245 }
3246
3247 return stub_name;
3248 }
3249
3250 /* Look up an entry in the stub hash. Stub entries are cached because
3251 creating the stub name takes a bit of time. */
3252
3253 static struct elf32_arm_stub_hash_entry *
3254 elf32_arm_get_stub_entry (const asection *input_section,
3255 const asection *sym_sec,
3256 struct elf_link_hash_entry *hash,
3257 const Elf_Internal_Rela *rel,
3258 struct elf32_arm_link_hash_table *htab)
3259 {
3260 struct elf32_arm_stub_hash_entry *stub_entry;
3261 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3262 const asection *id_sec;
3263
3264 if ((input_section->flags & SEC_CODE) == 0)
3265 return NULL;
3266
3267 /* If this input section is part of a group of sections sharing one
3268 stub section, then use the id of the first section in the group.
3269 Stub names need to include a section id, as there may well be
3270 more than one stub used to reach say, printf, and we need to
3271 distinguish between them. */
3272 id_sec = htab->stub_group[input_section->id].link_sec;
3273
3274 if (h != NULL && h->stub_cache != NULL
3275 && h->stub_cache->h == h
3276 && h->stub_cache->id_sec == id_sec)
3277 {
3278 stub_entry = h->stub_cache;
3279 }
3280 else
3281 {
3282 char *stub_name;
3283
3284 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3285 if (stub_name == NULL)
3286 return NULL;
3287
3288 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3289 stub_name, FALSE, FALSE);
3290 if (h != NULL)
3291 h->stub_cache = stub_entry;
3292
3293 free (stub_name);
3294 }
3295
3296 return stub_entry;
3297 }
3298
3299 /* Find or create a stub section. Returns a pointer to the stub section, and
3300 the section to which the stub section will be attached (in *LINK_SEC_P).
3301 LINK_SEC_P may be NULL. */
3302
3303 static asection *
3304 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3305 struct elf32_arm_link_hash_table *htab)
3306 {
3307 asection *link_sec;
3308 asection *stub_sec;
3309
3310 link_sec = htab->stub_group[section->id].link_sec;
3311 stub_sec = htab->stub_group[section->id].stub_sec;
3312 if (stub_sec == NULL)
3313 {
3314 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3315 if (stub_sec == NULL)
3316 {
3317 size_t namelen;
3318 bfd_size_type len;
3319 char *s_name;
3320
3321 namelen = strlen (link_sec->name);
3322 len = namelen + sizeof (STUB_SUFFIX);
3323 s_name = bfd_alloc (htab->stub_bfd, len);
3324 if (s_name == NULL)
3325 return NULL;
3326
3327 memcpy (s_name, link_sec->name, namelen);
3328 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3329 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3330 if (stub_sec == NULL)
3331 return NULL;
3332 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3333 }
3334 htab->stub_group[section->id].stub_sec = stub_sec;
3335 }
3336
3337 if (link_sec_p)
3338 *link_sec_p = link_sec;
3339
3340 return stub_sec;
3341 }
3342
3343 /* Add a new stub entry to the stub hash. Not all fields of the new
3344 stub entry are initialised. */
3345
3346 static struct elf32_arm_stub_hash_entry *
3347 elf32_arm_add_stub (const char *stub_name,
3348 asection *section,
3349 struct elf32_arm_link_hash_table *htab)
3350 {
3351 asection *link_sec;
3352 asection *stub_sec;
3353 struct elf32_arm_stub_hash_entry *stub_entry;
3354
3355 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3356 if (stub_sec == NULL)
3357 return NULL;
3358
3359 /* Enter this entry into the linker stub hash table. */
3360 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3361 TRUE, FALSE);
3362 if (stub_entry == NULL)
3363 {
3364 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3365 section->owner,
3366 stub_name);
3367 return NULL;
3368 }
3369
3370 stub_entry->stub_sec = stub_sec;
3371 stub_entry->stub_offset = 0;
3372 stub_entry->id_sec = link_sec;
3373
3374 return stub_entry;
3375 }
3376
3377 /* Store an Arm insn into an output section not processed by
3378 elf32_arm_write_section. */
3379
3380 static void
3381 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3382 bfd * output_bfd, bfd_vma val, void * ptr)
3383 {
3384 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3385 bfd_putl32 (val, ptr);
3386 else
3387 bfd_putb32 (val, ptr);
3388 }
3389
3390 /* Store a 16-bit Thumb insn into an output section not processed by
3391 elf32_arm_write_section. */
3392
3393 static void
3394 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3395 bfd * output_bfd, bfd_vma val, void * ptr)
3396 {
3397 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3398 bfd_putl16 (val, ptr);
3399 else
3400 bfd_putb16 (val, ptr);
3401 }
3402
3403 static bfd_reloc_status_type elf32_arm_final_link_relocate
3404 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3405 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3406 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3407
3408 static bfd_boolean
3409 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3410 void * in_arg)
3411 {
3412 #define MAXRELOCS 2
3413 struct elf32_arm_stub_hash_entry *stub_entry;
3414 struct bfd_link_info *info;
3415 struct elf32_arm_link_hash_table *htab;
3416 asection *stub_sec;
3417 bfd *stub_bfd;
3418 bfd_vma stub_addr;
3419 bfd_byte *loc;
3420 bfd_vma sym_value;
3421 int template_size;
3422 int size;
3423 const insn_sequence *template;
3424 int i;
3425 struct elf32_arm_link_hash_table * globals;
3426 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3427 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3428 int nrelocs = 0;
3429
3430 /* Massage our args to the form they really have. */
3431 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3432 info = (struct bfd_link_info *) in_arg;
3433
3434 globals = elf32_arm_hash_table (info);
3435
3436 htab = elf32_arm_hash_table (info);
3437 stub_sec = stub_entry->stub_sec;
3438
3439 /* Make a note of the offset within the stubs for this entry. */
3440 stub_entry->stub_offset = stub_sec->size;
3441 loc = stub_sec->contents + stub_entry->stub_offset;
3442
3443 stub_bfd = stub_sec->owner;
3444
3445 /* This is the address of the start of the stub. */
3446 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3447 + stub_entry->stub_offset;
3448
3449 /* This is the address of the stub destination. */
3450 sym_value = (stub_entry->target_value
3451 + stub_entry->target_section->output_offset
3452 + stub_entry->target_section->output_section->vma);
3453
3454 template = stub_entry->stub_template;
3455 template_size = stub_entry->stub_template_size;
3456
3457 size = 0;
3458 for (i = 0; i < template_size; i++)
3459 {
3460 switch (template[i].type)
3461 {
3462 case THUMB16_TYPE:
3463 {
3464 bfd_vma data = template[i].data;
3465 if (template[i].reloc_addend != 0)
3466 {
3467 /* We've borrowed the reloc_addend field to mean we should
3468 insert a condition code into this (Thumb-1 branch)
3469 instruction. See THUMB16_BCOND_INSN. */
3470 BFD_ASSERT ((data & 0xff00) == 0xd000);
3471 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3472 }
3473 put_thumb_insn (globals, stub_bfd, data, loc + size);
3474 size += 2;
3475 }
3476 break;
3477
3478 case THUMB32_TYPE:
3479 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3480 loc + size);
3481 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3482 loc + size + 2);
3483 if (template[i].r_type != R_ARM_NONE)
3484 {
3485 stub_reloc_idx[nrelocs] = i;
3486 stub_reloc_offset[nrelocs++] = size;
3487 }
3488 size += 4;
3489 break;
3490
3491 case ARM_TYPE:
3492 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3493 /* Handle cases where the target is encoded within the
3494 instruction. */
3495 if (template[i].r_type == R_ARM_JUMP24)
3496 {
3497 stub_reloc_idx[nrelocs] = i;
3498 stub_reloc_offset[nrelocs++] = size;
3499 }
3500 size += 4;
3501 break;
3502
3503 case DATA_TYPE:
3504 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3505 stub_reloc_idx[nrelocs] = i;
3506 stub_reloc_offset[nrelocs++] = size;
3507 size += 4;
3508 break;
3509
3510 default:
3511 BFD_FAIL ();
3512 return FALSE;
3513 }
3514 }
3515
3516 stub_sec->size += size;
3517
3518 /* Stub size has already been computed in arm_size_one_stub. Check
3519 consistency. */
3520 BFD_ASSERT (size == stub_entry->stub_size);
3521
3522 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3523 if (stub_entry->st_type == STT_ARM_TFUNC)
3524 sym_value |= 1;
3525
3526 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3527 in each stub. */
3528 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3529
3530 for (i = 0; i < nrelocs; i++)
3531 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3532 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3533 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3534 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3535 {
3536 Elf_Internal_Rela rel;
3537 bfd_boolean unresolved_reloc;
3538 char *error_message;
3539 int sym_flags
3540 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3541 ? STT_ARM_TFUNC : 0;
3542 bfd_vma points_to = sym_value + stub_entry->target_addend;
3543
3544 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3545 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3546 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3547
3548 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3549 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3550 template should refer back to the instruction after the original
3551 branch. */
3552 points_to = sym_value;
3553
3554 /* There may be unintended consequences if this is not true. */
3555 BFD_ASSERT (stub_entry->h == NULL);
3556
3557 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3558 properly. We should probably use this function unconditionally,
3559 rather than only for certain relocations listed in the enclosing
3560 conditional, for the sake of consistency. */
3561 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3562 (template[stub_reloc_idx[i]].r_type),
3563 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3564 points_to, info, stub_entry->target_section, "", sym_flags,
3565 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3566 &error_message);
3567 }
3568 else
3569 {
3570 _bfd_final_link_relocate (elf32_arm_howto_from_type
3571 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3572 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3573 sym_value + stub_entry->target_addend,
3574 template[stub_reloc_idx[i]].reloc_addend);
3575 }
3576
3577 return TRUE;
3578 #undef MAXRELOCS
3579 }
3580
3581 /* Calculate the template, template size and instruction size for a stub.
3582 Return value is the instruction size. */
3583
3584 static unsigned int
3585 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3586 const insn_sequence **stub_template,
3587 int *stub_template_size)
3588 {
3589 const insn_sequence *template = NULL;
3590 int template_size = 0, i;
3591 unsigned int size;
3592
3593 template = stub_definitions[stub_type].template;
3594 template_size = stub_definitions[stub_type].template_size;
3595
3596 size = 0;
3597 for (i = 0; i < template_size; i++)
3598 {
3599 switch (template[i].type)
3600 {
3601 case THUMB16_TYPE:
3602 size += 2;
3603 break;
3604
3605 case ARM_TYPE:
3606 case THUMB32_TYPE:
3607 case DATA_TYPE:
3608 size += 4;
3609 break;
3610
3611 default:
3612 BFD_FAIL ();
3613 return FALSE;
3614 }
3615 }
3616
3617 if (stub_template)
3618 *stub_template = template;
3619
3620 if (stub_template_size)
3621 *stub_template_size = template_size;
3622
3623 return size;
3624 }
3625
3626 /* As above, but don't actually build the stub. Just bump offset so
3627 we know stub section sizes. */
3628
3629 static bfd_boolean
3630 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3631 void * in_arg)
3632 {
3633 struct elf32_arm_stub_hash_entry *stub_entry;
3634 struct elf32_arm_link_hash_table *htab;
3635 const insn_sequence *template;
3636 int template_size, size;
3637
3638 /* Massage our args to the form they really have. */
3639 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3640 htab = (struct elf32_arm_link_hash_table *) in_arg;
3641
3642 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3643 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3644
3645 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3646 &template_size);
3647
3648 stub_entry->stub_size = size;
3649 stub_entry->stub_template = template;
3650 stub_entry->stub_template_size = template_size;
3651
3652 size = (size + 7) & ~7;
3653 stub_entry->stub_sec->size += size;
3654
3655 return TRUE;
3656 }
3657
3658 /* External entry points for sizing and building linker stubs. */
3659
3660 /* Set up various things so that we can make a list of input sections
3661 for each output section included in the link. Returns -1 on error,
3662 0 when no stubs will be needed, and 1 on success. */
3663
3664 int
3665 elf32_arm_setup_section_lists (bfd *output_bfd,
3666 struct bfd_link_info *info)
3667 {
3668 bfd *input_bfd;
3669 unsigned int bfd_count;
3670 int top_id, top_index;
3671 asection *section;
3672 asection **input_list, **list;
3673 bfd_size_type amt;
3674 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3675
3676 if (! is_elf_hash_table (htab))
3677 return 0;
3678
3679 /* Count the number of input BFDs and find the top input section id. */
3680 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3681 input_bfd != NULL;
3682 input_bfd = input_bfd->link_next)
3683 {
3684 bfd_count += 1;
3685 for (section = input_bfd->sections;
3686 section != NULL;
3687 section = section->next)
3688 {
3689 if (top_id < section->id)
3690 top_id = section->id;
3691 }
3692 }
3693 htab->bfd_count = bfd_count;
3694
3695 amt = sizeof (struct map_stub) * (top_id + 1);
3696 htab->stub_group = bfd_zmalloc (amt);
3697 if (htab->stub_group == NULL)
3698 return -1;
3699
3700 /* We can't use output_bfd->section_count here to find the top output
3701 section index as some sections may have been removed, and
3702 _bfd_strip_section_from_output doesn't renumber the indices. */
3703 for (section = output_bfd->sections, top_index = 0;
3704 section != NULL;
3705 section = section->next)
3706 {
3707 if (top_index < section->index)
3708 top_index = section->index;
3709 }
3710
3711 htab->top_index = top_index;
3712 amt = sizeof (asection *) * (top_index + 1);
3713 input_list = bfd_malloc (amt);
3714 htab->input_list = input_list;
3715 if (input_list == NULL)
3716 return -1;
3717
3718 /* For sections we aren't interested in, mark their entries with a
3719 value we can check later. */
3720 list = input_list + top_index;
3721 do
3722 *list = bfd_abs_section_ptr;
3723 while (list-- != input_list);
3724
3725 for (section = output_bfd->sections;
3726 section != NULL;
3727 section = section->next)
3728 {
3729 if ((section->flags & SEC_CODE) != 0)
3730 input_list[section->index] = NULL;
3731 }
3732
3733 return 1;
3734 }
3735
3736 /* The linker repeatedly calls this function for each input section,
3737 in the order that input sections are linked into output sections.
3738 Build lists of input sections to determine groupings between which
3739 we may insert linker stubs. */
3740
3741 void
3742 elf32_arm_next_input_section (struct bfd_link_info *info,
3743 asection *isec)
3744 {
3745 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3746
3747 if (isec->output_section->index <= htab->top_index)
3748 {
3749 asection **list = htab->input_list + isec->output_section->index;
3750
3751 if (*list != bfd_abs_section_ptr)
3752 {
3753 /* Steal the link_sec pointer for our list. */
3754 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3755 /* This happens to make the list in reverse order,
3756 which we reverse later. */
3757 PREV_SEC (isec) = *list;
3758 *list = isec;
3759 }
3760 }
3761 }
3762
3763 /* See whether we can group stub sections together. Grouping stub
3764 sections may result in fewer stubs. More importantly, we need to
3765 put all .init* and .fini* stubs at the end of the .init or
3766 .fini output sections respectively, because glibc splits the
3767 _init and _fini functions into multiple parts. Putting a stub in
3768 the middle of a function is not a good idea. */
3769
3770 static void
3771 group_sections (struct elf32_arm_link_hash_table *htab,
3772 bfd_size_type stub_group_size,
3773 bfd_boolean stubs_always_after_branch)
3774 {
3775 asection **list = htab->input_list;
3776
3777 do
3778 {
3779 asection *tail = *list;
3780 asection *head;
3781
3782 if (tail == bfd_abs_section_ptr)
3783 continue;
3784
3785 /* Reverse the list: we must avoid placing stubs at the
3786 beginning of the section because the beginning of the text
3787 section may be required for an interrupt vector in bare metal
3788 code. */
3789 #define NEXT_SEC PREV_SEC
3790 head = NULL;
3791 while (tail != NULL)
3792 {
3793 /* Pop from tail. */
3794 asection *item = tail;
3795 tail = PREV_SEC (item);
3796
3797 /* Push on head. */
3798 NEXT_SEC (item) = head;
3799 head = item;
3800 }
3801
3802 while (head != NULL)
3803 {
3804 asection *curr;
3805 asection *next;
3806 bfd_vma stub_group_start = head->output_offset;
3807 bfd_vma end_of_next;
3808
3809 curr = head;
3810 while (NEXT_SEC (curr) != NULL)
3811 {
3812 next = NEXT_SEC (curr);
3813 end_of_next = next->output_offset + next->size;
3814 if (end_of_next - stub_group_start >= stub_group_size)
3815 /* End of NEXT is too far from start, so stop. */
3816 break;
3817 /* Add NEXT to the group. */
3818 curr = next;
3819 }
3820
3821 /* OK, the size from the start to the start of CURR is less
3822 than stub_group_size and thus can be handled by one stub
3823 section. (Or the head section is itself larger than
3824 stub_group_size, in which case we may be toast.)
3825 We should really be keeping track of the total size of
3826 stubs added here, as stubs contribute to the final output
3827 section size. */
3828 do
3829 {
3830 next = NEXT_SEC (head);
3831 /* Set up this stub group. */
3832 htab->stub_group[head->id].link_sec = curr;
3833 }
3834 while (head != curr && (head = next) != NULL);
3835
3836 /* But wait, there's more! Input sections up to stub_group_size
3837 bytes after the stub section can be handled by it too. */
3838 if (!stubs_always_after_branch)
3839 {
3840 stub_group_start = curr->output_offset + curr->size;
3841
3842 while (next != NULL)
3843 {
3844 end_of_next = next->output_offset + next->size;
3845 if (end_of_next - stub_group_start >= stub_group_size)
3846 /* End of NEXT is too far from stubs, so stop. */
3847 break;
3848 /* Add NEXT to the stub group. */
3849 head = next;
3850 next = NEXT_SEC (head);
3851 htab->stub_group[head->id].link_sec = curr;
3852 }
3853 }
3854 head = next;
3855 }
3856 }
3857 while (list++ != htab->input_list + htab->top_index);
3858
3859 free (htab->input_list);
3860 #undef PREV_SEC
3861 #undef NEXT_SEC
3862 }
3863
3864 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3865 erratum fix. */
3866
3867 static int
3868 a8_reloc_compare (const void *a, const void *b)
3869 {
3870 const struct a8_erratum_reloc *ra = a, *rb = b;
3871
3872 if (ra->from < rb->from)
3873 return -1;
3874 else if (ra->from > rb->from)
3875 return 1;
3876 else
3877 return 0;
3878 }
3879
3880 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3881 const char *, char **);
3882
3883 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3884 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3885 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3886 otherwise. */
3887
3888 static bfd_boolean
3889 cortex_a8_erratum_scan (bfd *input_bfd,
3890 struct bfd_link_info *info,
3891 struct a8_erratum_fix **a8_fixes_p,
3892 unsigned int *num_a8_fixes_p,
3893 unsigned int *a8_fix_table_size_p,
3894 struct a8_erratum_reloc *a8_relocs,
3895 unsigned int num_a8_relocs)
3896 {
3897 asection *section;
3898 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3899 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3900 unsigned int num_a8_fixes = *num_a8_fixes_p;
3901 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3902
3903 for (section = input_bfd->sections;
3904 section != NULL;
3905 section = section->next)
3906 {
3907 bfd_byte *contents = NULL;
3908 struct _arm_elf_section_data *sec_data;
3909 unsigned int span;
3910 bfd_vma base_vma;
3911
3912 if (elf_section_type (section) != SHT_PROGBITS
3913 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3914 || (section->flags & SEC_EXCLUDE) != 0
3915 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3916 || (section->output_section == bfd_abs_section_ptr))
3917 continue;
3918
3919 base_vma = section->output_section->vma + section->output_offset;
3920
3921 if (elf_section_data (section)->this_hdr.contents != NULL)
3922 contents = elf_section_data (section)->this_hdr.contents;
3923 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3924 return TRUE;
3925
3926 sec_data = elf32_arm_section_data (section);
3927
3928 for (span = 0; span < sec_data->mapcount; span++)
3929 {
3930 unsigned int span_start = sec_data->map[span].vma;
3931 unsigned int span_end = (span == sec_data->mapcount - 1)
3932 ? section->size : sec_data->map[span + 1].vma;
3933 unsigned int i;
3934 char span_type = sec_data->map[span].type;
3935 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3936
3937 if (span_type != 't')
3938 continue;
3939
3940 /* Span is entirely within a single 4KB region: skip scanning. */
3941 if (((base_vma + span_start) & ~0xfff)
3942 == ((base_vma + span_end) & ~0xfff))
3943 continue;
3944
3945 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3946
3947 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3948 * The branch target is in the same 4KB region as the
3949 first half of the branch.
3950 * The instruction before the branch is a 32-bit
3951 length non-branch instruction. */
3952 for (i = span_start; i < span_end;)
3953 {
3954 unsigned int insn = bfd_getl16 (&contents[i]);
3955 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3956 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3957
3958 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3959 insn_32bit = TRUE;
3960
3961 if (insn_32bit)
3962 {
3963 /* Load the rest of the insn (in manual-friendly order). */
3964 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3965
3966 /* Encoding T4: B<c>.W. */
3967 is_b = (insn & 0xf800d000) == 0xf0009000;
3968 /* Encoding T1: BL<c>.W. */
3969 is_bl = (insn & 0xf800d000) == 0xf000d000;
3970 /* Encoding T2: BLX<c>.W. */
3971 is_blx = (insn & 0xf800d000) == 0xf000c000;
3972 /* Encoding T3: B<c>.W (not permitted in IT block). */
3973 is_bcc = (insn & 0xf800d000) == 0xf0008000
3974 && (insn & 0x07f00000) != 0x03800000;
3975 }
3976
3977 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3978
3979 if (((base_vma + i) & 0xfff) == 0xffe
3980 && insn_32bit
3981 && is_32bit_branch
3982 && last_was_32bit
3983 && ! last_was_branch)
3984 {
3985 bfd_signed_vma offset;
3986 bfd_boolean force_target_arm = FALSE;
3987 bfd_boolean force_target_thumb = FALSE;
3988 bfd_vma target;
3989 enum elf32_arm_stub_type stub_type = arm_stub_none;
3990 struct a8_erratum_reloc key, *found;
3991
3992 key.from = base_vma + i;
3993 found = bsearch (&key, a8_relocs, num_a8_relocs,
3994 sizeof (struct a8_erratum_reloc),
3995 &a8_reloc_compare);
3996
3997 if (found)
3998 {
3999 char *error_message = NULL;
4000 struct elf_link_hash_entry *entry;
4001
4002 /* We don't care about the error returned from this
4003 function, only if there is glue or not. */
4004 entry = find_thumb_glue (info, found->sym_name,
4005 &error_message);
4006
4007 if (entry)
4008 found->non_a8_stub = TRUE;
4009
4010 if (found->r_type == R_ARM_THM_CALL
4011 && found->st_type != STT_ARM_TFUNC)
4012 force_target_arm = TRUE;
4013 else if (found->r_type == R_ARM_THM_CALL
4014 && found->st_type == STT_ARM_TFUNC)
4015 force_target_thumb = TRUE;
4016 }
4017
4018 /* Check if we have an offending branch instruction. */
4019
4020 if (found && found->non_a8_stub)
4021 /* We've already made a stub for this instruction, e.g.
4022 it's a long branch or a Thumb->ARM stub. Assume that
4023 stub will suffice to work around the A8 erratum (see
4024 setting of always_after_branch above). */
4025 ;
4026 else if (is_bcc)
4027 {
4028 offset = (insn & 0x7ff) << 1;
4029 offset |= (insn & 0x3f0000) >> 4;
4030 offset |= (insn & 0x2000) ? 0x40000 : 0;
4031 offset |= (insn & 0x800) ? 0x80000 : 0;
4032 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4033 if (offset & 0x100000)
4034 offset |= ~ ((bfd_signed_vma) 0xfffff);
4035 stub_type = arm_stub_a8_veneer_b_cond;
4036 }
4037 else if (is_b || is_bl || is_blx)
4038 {
4039 int s = (insn & 0x4000000) != 0;
4040 int j1 = (insn & 0x2000) != 0;
4041 int j2 = (insn & 0x800) != 0;
4042 int i1 = !(j1 ^ s);
4043 int i2 = !(j2 ^ s);
4044
4045 offset = (insn & 0x7ff) << 1;
4046 offset |= (insn & 0x3ff0000) >> 4;
4047 offset |= i2 << 22;
4048 offset |= i1 << 23;
4049 offset |= s << 24;
4050 if (offset & 0x1000000)
4051 offset |= ~ ((bfd_signed_vma) 0xffffff);
4052
4053 if (is_blx)
4054 offset &= ~ ((bfd_signed_vma) 3);
4055
4056 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4057 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4058 }
4059
4060 if (stub_type != arm_stub_none)
4061 {
4062 bfd_vma pc_for_insn = base_vma + i + 4;
4063
4064 /* The original instruction is a BL, but the target is
4065 an ARM instruction. If we were not making a stub,
4066 the BL would have been converted to a BLX. Use the
4067 BLX stub instead in that case. */
4068 if (htab->use_blx && force_target_arm
4069 && stub_type == arm_stub_a8_veneer_bl)
4070 {
4071 stub_type = arm_stub_a8_veneer_blx;
4072 is_blx = TRUE;
4073 is_bl = FALSE;
4074 }
4075 /* Conversely, if the original instruction was
4076 BLX but the target is Thumb mode, use the BL
4077 stub. */
4078 else if (force_target_thumb
4079 && stub_type == arm_stub_a8_veneer_blx)
4080 {
4081 stub_type = arm_stub_a8_veneer_bl;
4082 is_blx = FALSE;
4083 is_bl = TRUE;
4084 }
4085
4086 if (is_blx)
4087 pc_for_insn &= ~ ((bfd_vma) 3);
4088
4089 /* If we found a relocation, use the proper destination,
4090 not the offset in the (unrelocated) instruction.
4091 Note this is always done if we switched the stub type
4092 above. */
4093 if (found)
4094 offset =
4095 (bfd_signed_vma) (found->destination - pc_for_insn);
4096
4097 target = pc_for_insn + offset;
4098
4099 /* The BLX stub is ARM-mode code. Adjust the offset to
4100 take the different PC value (+8 instead of +4) into
4101 account. */
4102 if (stub_type == arm_stub_a8_veneer_blx)
4103 offset += 4;
4104
4105 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4106 {
4107 char *stub_name;
4108
4109 if (num_a8_fixes == a8_fix_table_size)
4110 {
4111 a8_fix_table_size *= 2;
4112 a8_fixes = bfd_realloc (a8_fixes,
4113 sizeof (struct a8_erratum_fix)
4114 * a8_fix_table_size);
4115 }
4116
4117 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4118 if (stub_name != NULL)
4119 sprintf (stub_name, "%x:%x", section->id, i);
4120
4121 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4122 a8_fixes[num_a8_fixes].section = section;
4123 a8_fixes[num_a8_fixes].offset = i;
4124 a8_fixes[num_a8_fixes].addend = offset;
4125 a8_fixes[num_a8_fixes].orig_insn = insn;
4126 a8_fixes[num_a8_fixes].stub_name = stub_name;
4127 a8_fixes[num_a8_fixes].stub_type = stub_type;
4128
4129 num_a8_fixes++;
4130 }
4131 }
4132 }
4133
4134 i += insn_32bit ? 4 : 2;
4135 last_was_32bit = insn_32bit;
4136 last_was_branch = is_32bit_branch;
4137 }
4138 }
4139
4140 if (elf_section_data (section)->this_hdr.contents == NULL)
4141 free (contents);
4142 }
4143
4144 *a8_fixes_p = a8_fixes;
4145 *num_a8_fixes_p = num_a8_fixes;
4146 *a8_fix_table_size_p = a8_fix_table_size;
4147
4148 return FALSE;
4149 }
4150
4151 /* Determine and set the size of the stub section for a final link.
4152
4153 The basic idea here is to examine all the relocations looking for
4154 PC-relative calls to a target that is unreachable with a "bl"
4155 instruction. */
4156
4157 bfd_boolean
4158 elf32_arm_size_stubs (bfd *output_bfd,
4159 bfd *stub_bfd,
4160 struct bfd_link_info *info,
4161 bfd_signed_vma group_size,
4162 asection * (*add_stub_section) (const char *, asection *),
4163 void (*layout_sections_again) (void))
4164 {
4165 bfd_size_type stub_group_size;
4166 bfd_boolean stubs_always_after_branch;
4167 bfd_boolean stub_changed = 0;
4168 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4169 struct a8_erratum_fix *a8_fixes = NULL;
4170 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4171 struct a8_erratum_reloc *a8_relocs = NULL;
4172 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4173
4174 if (htab->fix_cortex_a8)
4175 {
4176 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4177 * a8_fix_table_size);
4178 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4179 * a8_reloc_table_size);
4180 }
4181
4182 /* Propagate mach to stub bfd, because it may not have been
4183 finalized when we created stub_bfd. */
4184 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4185 bfd_get_mach (output_bfd));
4186
4187 /* Stash our params away. */
4188 htab->stub_bfd = stub_bfd;
4189 htab->add_stub_section = add_stub_section;
4190 htab->layout_sections_again = layout_sections_again;
4191 stubs_always_after_branch = group_size < 0;
4192
4193 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4194 as the first half of a 32-bit branch straddling two 4K pages. This is a
4195 crude way of enforcing that. */
4196 if (htab->fix_cortex_a8)
4197 stubs_always_after_branch = 1;
4198
4199 if (group_size < 0)
4200 stub_group_size = -group_size;
4201 else
4202 stub_group_size = group_size;
4203
4204 if (stub_group_size == 1)
4205 {
4206 /* Default values. */
4207 /* Thumb branch range is +-4MB has to be used as the default
4208 maximum size (a given section can contain both ARM and Thumb
4209 code, so the worst case has to be taken into account).
4210
4211 This value is 24K less than that, which allows for 2025
4212 12-byte stubs. If we exceed that, then we will fail to link.
4213 The user will have to relink with an explicit group size
4214 option. */
4215 stub_group_size = 4170000;
4216 }
4217
4218 group_sections (htab, stub_group_size, stubs_always_after_branch);
4219
4220 while (1)
4221 {
4222 bfd *input_bfd;
4223 unsigned int bfd_indx;
4224 asection *stub_sec;
4225
4226 num_a8_fixes = 0;
4227
4228 for (input_bfd = info->input_bfds, bfd_indx = 0;
4229 input_bfd != NULL;
4230 input_bfd = input_bfd->link_next, bfd_indx++)
4231 {
4232 Elf_Internal_Shdr *symtab_hdr;
4233 asection *section;
4234 Elf_Internal_Sym *local_syms = NULL;
4235
4236 num_a8_relocs = 0;
4237
4238 /* We'll need the symbol table in a second. */
4239 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4240 if (symtab_hdr->sh_info == 0)
4241 continue;
4242
4243 /* Walk over each section attached to the input bfd. */
4244 for (section = input_bfd->sections;
4245 section != NULL;
4246 section = section->next)
4247 {
4248 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4249
4250 /* If there aren't any relocs, then there's nothing more
4251 to do. */
4252 if ((section->flags & SEC_RELOC) == 0
4253 || section->reloc_count == 0
4254 || (section->flags & SEC_CODE) == 0)
4255 continue;
4256
4257 /* If this section is a link-once section that will be
4258 discarded, then don't create any stubs. */
4259 if (section->output_section == NULL
4260 || section->output_section->owner != output_bfd)
4261 continue;
4262
4263 /* Get the relocs. */
4264 internal_relocs
4265 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4266 NULL, info->keep_memory);
4267 if (internal_relocs == NULL)
4268 goto error_ret_free_local;
4269
4270 /* Now examine each relocation. */
4271 irela = internal_relocs;
4272 irelaend = irela + section->reloc_count;
4273 for (; irela < irelaend; irela++)
4274 {
4275 unsigned int r_type, r_indx;
4276 enum elf32_arm_stub_type stub_type;
4277 struct elf32_arm_stub_hash_entry *stub_entry;
4278 asection *sym_sec;
4279 bfd_vma sym_value;
4280 bfd_vma destination;
4281 struct elf32_arm_link_hash_entry *hash;
4282 const char *sym_name;
4283 char *stub_name;
4284 const asection *id_sec;
4285 unsigned char st_type;
4286 bfd_boolean created_stub = FALSE;
4287
4288 r_type = ELF32_R_TYPE (irela->r_info);
4289 r_indx = ELF32_R_SYM (irela->r_info);
4290
4291 if (r_type >= (unsigned int) R_ARM_max)
4292 {
4293 bfd_set_error (bfd_error_bad_value);
4294 error_ret_free_internal:
4295 if (elf_section_data (section)->relocs == NULL)
4296 free (internal_relocs);
4297 goto error_ret_free_local;
4298 }
4299
4300 /* Only look for stubs on branch instructions. */
4301 if ((r_type != (unsigned int) R_ARM_CALL)
4302 && (r_type != (unsigned int) R_ARM_THM_CALL)
4303 && (r_type != (unsigned int) R_ARM_JUMP24)
4304 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4305 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4306 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4307 && (r_type != (unsigned int) R_ARM_PLT32))
4308 continue;
4309
4310 /* Now determine the call target, its name, value,
4311 section. */
4312 sym_sec = NULL;
4313 sym_value = 0;
4314 destination = 0;
4315 hash = NULL;
4316 sym_name = NULL;
4317 if (r_indx < symtab_hdr->sh_info)
4318 {
4319 /* It's a local symbol. */
4320 Elf_Internal_Sym *sym;
4321 Elf_Internal_Shdr *hdr;
4322
4323 if (local_syms == NULL)
4324 {
4325 local_syms
4326 = (Elf_Internal_Sym *) symtab_hdr->contents;
4327 if (local_syms == NULL)
4328 local_syms
4329 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4330 symtab_hdr->sh_info, 0,
4331 NULL, NULL, NULL);
4332 if (local_syms == NULL)
4333 goto error_ret_free_internal;
4334 }
4335
4336 sym = local_syms + r_indx;
4337 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4338 sym_sec = hdr->bfd_section;
4339 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4340 sym_value = sym->st_value;
4341 destination = (sym_value + irela->r_addend
4342 + sym_sec->output_offset
4343 + sym_sec->output_section->vma);
4344 st_type = ELF_ST_TYPE (sym->st_info);
4345 sym_name
4346 = bfd_elf_string_from_elf_section (input_bfd,
4347 symtab_hdr->sh_link,
4348 sym->st_name);
4349 }
4350 else
4351 {
4352 /* It's an external symbol. */
4353 int e_indx;
4354
4355 e_indx = r_indx - symtab_hdr->sh_info;
4356 hash = ((struct elf32_arm_link_hash_entry *)
4357 elf_sym_hashes (input_bfd)[e_indx]);
4358
4359 while (hash->root.root.type == bfd_link_hash_indirect
4360 || hash->root.root.type == bfd_link_hash_warning)
4361 hash = ((struct elf32_arm_link_hash_entry *)
4362 hash->root.root.u.i.link);
4363
4364 if (hash->root.root.type == bfd_link_hash_defined
4365 || hash->root.root.type == bfd_link_hash_defweak)
4366 {
4367 sym_sec = hash->root.root.u.def.section;
4368 sym_value = hash->root.root.u.def.value;
4369
4370 struct elf32_arm_link_hash_table *globals =
4371 elf32_arm_hash_table (info);
4372
4373 /* For a destination in a shared library,
4374 use the PLT stub as target address to
4375 decide whether a branch stub is
4376 needed. */
4377 if (globals->splt != NULL && hash != NULL
4378 && hash->root.plt.offset != (bfd_vma) -1)
4379 {
4380 sym_sec = globals->splt;
4381 sym_value = hash->root.plt.offset;
4382 if (sym_sec->output_section != NULL)
4383 destination = (sym_value
4384 + sym_sec->output_offset
4385 + sym_sec->output_section->vma);
4386 }
4387 else if (sym_sec->output_section != NULL)
4388 destination = (sym_value + irela->r_addend
4389 + sym_sec->output_offset
4390 + sym_sec->output_section->vma);
4391 }
4392 else if ((hash->root.root.type == bfd_link_hash_undefined)
4393 || (hash->root.root.type == bfd_link_hash_undefweak))
4394 {
4395 /* For a shared library, use the PLT stub as
4396 target address to decide whether a long
4397 branch stub is needed.
4398 For absolute code, they cannot be handled. */
4399 struct elf32_arm_link_hash_table *globals =
4400 elf32_arm_hash_table (info);
4401
4402 if (globals->splt != NULL && hash != NULL
4403 && hash->root.plt.offset != (bfd_vma) -1)
4404 {
4405 sym_sec = globals->splt;
4406 sym_value = hash->root.plt.offset;
4407 if (sym_sec->output_section != NULL)
4408 destination = (sym_value
4409 + sym_sec->output_offset
4410 + sym_sec->output_section->vma);
4411 }
4412 else
4413 continue;
4414 }
4415 else
4416 {
4417 bfd_set_error (bfd_error_bad_value);
4418 goto error_ret_free_internal;
4419 }
4420 st_type = ELF_ST_TYPE (hash->root.type);
4421 sym_name = hash->root.root.root.string;
4422 }
4423
4424 do
4425 {
4426 /* Determine what (if any) linker stub is needed. */
4427 stub_type = arm_type_of_stub (info, section, irela,
4428 st_type, hash,
4429 destination, sym_sec,
4430 input_bfd, sym_name);
4431 if (stub_type == arm_stub_none)
4432 break;
4433
4434 /* Support for grouping stub sections. */
4435 id_sec = htab->stub_group[section->id].link_sec;
4436
4437 /* Get the name of this stub. */
4438 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4439 irela);
4440 if (!stub_name)
4441 goto error_ret_free_internal;
4442
4443 /* We've either created a stub for this reloc already,
4444 or we are about to. */
4445 created_stub = TRUE;
4446
4447 stub_entry = arm_stub_hash_lookup
4448 (&htab->stub_hash_table, stub_name,
4449 FALSE, FALSE);
4450 if (stub_entry != NULL)
4451 {
4452 /* The proper stub has already been created. */
4453 free (stub_name);
4454 break;
4455 }
4456
4457 stub_entry = elf32_arm_add_stub (stub_name, section,
4458 htab);
4459 if (stub_entry == NULL)
4460 {
4461 free (stub_name);
4462 goto error_ret_free_internal;
4463 }
4464
4465 stub_entry->target_value = sym_value;
4466 stub_entry->target_section = sym_sec;
4467 stub_entry->stub_type = stub_type;
4468 stub_entry->h = hash;
4469 stub_entry->st_type = st_type;
4470
4471 if (sym_name == NULL)
4472 sym_name = "unnamed";
4473 stub_entry->output_name
4474 = bfd_alloc (htab->stub_bfd,
4475 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4476 + strlen (sym_name));
4477 if (stub_entry->output_name == NULL)
4478 {
4479 free (stub_name);
4480 goto error_ret_free_internal;
4481 }
4482
4483 /* For historical reasons, use the existing names for
4484 ARM-to-Thumb and Thumb-to-ARM stubs. */
4485 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4486 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4487 && st_type != STT_ARM_TFUNC)
4488 sprintf (stub_entry->output_name,
4489 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4490 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4491 || (r_type == (unsigned int) R_ARM_JUMP24))
4492 && st_type == STT_ARM_TFUNC)
4493 sprintf (stub_entry->output_name,
4494 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4495 else
4496 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4497 sym_name);
4498
4499 stub_changed = TRUE;
4500 }
4501 while (0);
4502
4503 /* Look for relocations which might trigger Cortex-A8
4504 erratum. */
4505 if (htab->fix_cortex_a8
4506 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4507 || r_type == (unsigned int) R_ARM_THM_JUMP19
4508 || r_type == (unsigned int) R_ARM_THM_CALL
4509 || r_type == (unsigned int) R_ARM_THM_XPC22))
4510 {
4511 bfd_vma from = section->output_section->vma
4512 + section->output_offset
4513 + irela->r_offset;
4514
4515 if ((from & 0xfff) == 0xffe)
4516 {
4517 /* Found a candidate. Note we haven't checked the
4518 destination is within 4K here: if we do so (and
4519 don't create an entry in a8_relocs) we can't tell
4520 that a branch should have been relocated when
4521 scanning later. */
4522 if (num_a8_relocs == a8_reloc_table_size)
4523 {
4524 a8_reloc_table_size *= 2;
4525 a8_relocs = bfd_realloc (a8_relocs,
4526 sizeof (struct a8_erratum_reloc)
4527 * a8_reloc_table_size);
4528 }
4529
4530 a8_relocs[num_a8_relocs].from = from;
4531 a8_relocs[num_a8_relocs].destination = destination;
4532 a8_relocs[num_a8_relocs].r_type = r_type;
4533 a8_relocs[num_a8_relocs].st_type = st_type;
4534 a8_relocs[num_a8_relocs].sym_name = sym_name;
4535 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4536
4537 num_a8_relocs++;
4538 }
4539 }
4540 }
4541
4542 /* We're done with the internal relocs, free them. */
4543 if (elf_section_data (section)->relocs == NULL)
4544 free (internal_relocs);
4545 }
4546
4547 if (htab->fix_cortex_a8)
4548 {
4549 /* Sort relocs which might apply to Cortex-A8 erratum. */
4550 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4551 &a8_reloc_compare);
4552
4553 /* Scan for branches which might trigger Cortex-A8 erratum. */
4554 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4555 &num_a8_fixes, &a8_fix_table_size,
4556 a8_relocs, num_a8_relocs) != 0)
4557 goto error_ret_free_local;
4558 }
4559 }
4560
4561 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4562 stub_changed = TRUE;
4563
4564 if (!stub_changed)
4565 break;
4566
4567 /* OK, we've added some stubs. Find out the new size of the
4568 stub sections. */
4569 for (stub_sec = htab->stub_bfd->sections;
4570 stub_sec != NULL;
4571 stub_sec = stub_sec->next)
4572 {
4573 /* Ignore non-stub sections. */
4574 if (!strstr (stub_sec->name, STUB_SUFFIX))
4575 continue;
4576
4577 stub_sec->size = 0;
4578 }
4579
4580 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4581
4582 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4583 if (htab->fix_cortex_a8)
4584 for (i = 0; i < num_a8_fixes; i++)
4585 {
4586 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4587 a8_fixes[i].section, htab);
4588
4589 if (stub_sec == NULL)
4590 goto error_ret_free_local;
4591
4592 stub_sec->size
4593 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4594 NULL);
4595 }
4596
4597
4598 /* Ask the linker to do its stuff. */
4599 (*htab->layout_sections_again) ();
4600 stub_changed = FALSE;
4601 prev_num_a8_fixes = num_a8_fixes;
4602 }
4603
4604 /* Add stubs for Cortex-A8 erratum fixes now. */
4605 if (htab->fix_cortex_a8)
4606 {
4607 for (i = 0; i < num_a8_fixes; i++)
4608 {
4609 struct elf32_arm_stub_hash_entry *stub_entry;
4610 char *stub_name = a8_fixes[i].stub_name;
4611 asection *section = a8_fixes[i].section;
4612 unsigned int section_id = a8_fixes[i].section->id;
4613 asection *link_sec = htab->stub_group[section_id].link_sec;
4614 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4615 const insn_sequence *template;
4616 int template_size, size = 0;
4617
4618 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4619 TRUE, FALSE);
4620 if (stub_entry == NULL)
4621 {
4622 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4623 section->owner,
4624 stub_name);
4625 return FALSE;
4626 }
4627
4628 stub_entry->stub_sec = stub_sec;
4629 stub_entry->stub_offset = 0;
4630 stub_entry->id_sec = link_sec;
4631 stub_entry->stub_type = a8_fixes[i].stub_type;
4632 stub_entry->target_section = a8_fixes[i].section;
4633 stub_entry->target_value = a8_fixes[i].offset;
4634 stub_entry->target_addend = a8_fixes[i].addend;
4635 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4636 stub_entry->st_type = STT_ARM_TFUNC;
4637
4638 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4639 &template_size);
4640
4641 stub_entry->stub_size = size;
4642 stub_entry->stub_template = template;
4643 stub_entry->stub_template_size = template_size;
4644 }
4645
4646 /* Stash the Cortex-A8 erratum fix array for use later in
4647 elf32_arm_write_section(). */
4648 htab->a8_erratum_fixes = a8_fixes;
4649 htab->num_a8_erratum_fixes = num_a8_fixes;
4650 }
4651 else
4652 {
4653 htab->a8_erratum_fixes = NULL;
4654 htab->num_a8_erratum_fixes = 0;
4655 }
4656 return TRUE;
4657
4658 error_ret_free_local:
4659 return FALSE;
4660 }
4661
4662 /* Build all the stubs associated with the current output file. The
4663 stubs are kept in a hash table attached to the main linker hash
4664 table. We also set up the .plt entries for statically linked PIC
4665 functions here. This function is called via arm_elf_finish in the
4666 linker. */
4667
4668 bfd_boolean
4669 elf32_arm_build_stubs (struct bfd_link_info *info)
4670 {
4671 asection *stub_sec;
4672 struct bfd_hash_table *table;
4673 struct elf32_arm_link_hash_table *htab;
4674
4675 htab = elf32_arm_hash_table (info);
4676
4677 for (stub_sec = htab->stub_bfd->sections;
4678 stub_sec != NULL;
4679 stub_sec = stub_sec->next)
4680 {
4681 bfd_size_type size;
4682
4683 /* Ignore non-stub sections. */
4684 if (!strstr (stub_sec->name, STUB_SUFFIX))
4685 continue;
4686
4687 /* Allocate memory to hold the linker stubs. */
4688 size = stub_sec->size;
4689 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4690 if (stub_sec->contents == NULL && size != 0)
4691 return FALSE;
4692 stub_sec->size = 0;
4693 }
4694
4695 /* Build the stubs as directed by the stub hash table. */
4696 table = &htab->stub_hash_table;
4697 bfd_hash_traverse (table, arm_build_one_stub, info);
4698
4699 return TRUE;
4700 }
4701
4702 /* Locate the Thumb encoded calling stub for NAME. */
4703
4704 static struct elf_link_hash_entry *
4705 find_thumb_glue (struct bfd_link_info *link_info,
4706 const char *name,
4707 char **error_message)
4708 {
4709 char *tmp_name;
4710 struct elf_link_hash_entry *hash;
4711 struct elf32_arm_link_hash_table *hash_table;
4712
4713 /* We need a pointer to the armelf specific hash table. */
4714 hash_table = elf32_arm_hash_table (link_info);
4715
4716 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4717 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4718
4719 BFD_ASSERT (tmp_name);
4720
4721 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4722
4723 hash = elf_link_hash_lookup
4724 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4725
4726 if (hash == NULL
4727 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4728 tmp_name, name) == -1)
4729 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4730
4731 free (tmp_name);
4732
4733 return hash;
4734 }
4735
4736 /* Locate the ARM encoded calling stub for NAME. */
4737
4738 static struct elf_link_hash_entry *
4739 find_arm_glue (struct bfd_link_info *link_info,
4740 const char *name,
4741 char **error_message)
4742 {
4743 char *tmp_name;
4744 struct elf_link_hash_entry *myh;
4745 struct elf32_arm_link_hash_table *hash_table;
4746
4747 /* We need a pointer to the elfarm specific hash table. */
4748 hash_table = elf32_arm_hash_table (link_info);
4749
4750 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4751 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4752
4753 BFD_ASSERT (tmp_name);
4754
4755 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4756
4757 myh = elf_link_hash_lookup
4758 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4759
4760 if (myh == NULL
4761 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4762 tmp_name, name) == -1)
4763 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4764
4765 free (tmp_name);
4766
4767 return myh;
4768 }
4769
4770 /* ARM->Thumb glue (static images):
4771
4772 .arm
4773 __func_from_arm:
4774 ldr r12, __func_addr
4775 bx r12
4776 __func_addr:
4777 .word func @ behave as if you saw a ARM_32 reloc.
4778
4779 (v5t static images)
4780 .arm
4781 __func_from_arm:
4782 ldr pc, __func_addr
4783 __func_addr:
4784 .word func @ behave as if you saw a ARM_32 reloc.
4785
4786 (relocatable images)
4787 .arm
4788 __func_from_arm:
4789 ldr r12, __func_offset
4790 add r12, r12, pc
4791 bx r12
4792 __func_offset:
4793 .word func - . */
4794
4795 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4796 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4797 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4798 static const insn32 a2t3_func_addr_insn = 0x00000001;
4799
4800 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4801 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4802 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4803
4804 #define ARM2THUMB_PIC_GLUE_SIZE 16
4805 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4806 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4807 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4808
4809 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4810
4811 .thumb .thumb
4812 .align 2 .align 2
4813 __func_from_thumb: __func_from_thumb:
4814 bx pc push {r6, lr}
4815 nop ldr r6, __func_addr
4816 .arm mov lr, pc
4817 b func bx r6
4818 .arm
4819 ;; back_to_thumb
4820 ldmia r13! {r6, lr}
4821 bx lr
4822 __func_addr:
4823 .word func */
4824
4825 #define THUMB2ARM_GLUE_SIZE 8
4826 static const insn16 t2a1_bx_pc_insn = 0x4778;
4827 static const insn16 t2a2_noop_insn = 0x46c0;
4828 static const insn32 t2a3_b_insn = 0xea000000;
4829
4830 #define VFP11_ERRATUM_VENEER_SIZE 8
4831
4832 #define ARM_BX_VENEER_SIZE 12
4833 static const insn32 armbx1_tst_insn = 0xe3100001;
4834 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4835 static const insn32 armbx3_bx_insn = 0xe12fff10;
4836
4837 #ifndef ELFARM_NABI_C_INCLUDED
4838 static void
4839 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4840 {
4841 asection * s;
4842 bfd_byte * contents;
4843
4844 if (size == 0)
4845 {
4846 /* Do not include empty glue sections in the output. */
4847 if (abfd != NULL)
4848 {
4849 s = bfd_get_section_by_name (abfd, name);
4850 if (s != NULL)
4851 s->flags |= SEC_EXCLUDE;
4852 }
4853 return;
4854 }
4855
4856 BFD_ASSERT (abfd != NULL);
4857
4858 s = bfd_get_section_by_name (abfd, name);
4859 BFD_ASSERT (s != NULL);
4860
4861 contents = bfd_alloc (abfd, size);
4862
4863 BFD_ASSERT (s->size == size);
4864 s->contents = contents;
4865 }
4866
4867 bfd_boolean
4868 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4869 {
4870 struct elf32_arm_link_hash_table * globals;
4871
4872 globals = elf32_arm_hash_table (info);
4873 BFD_ASSERT (globals != NULL);
4874
4875 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4876 globals->arm_glue_size,
4877 ARM2THUMB_GLUE_SECTION_NAME);
4878
4879 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4880 globals->thumb_glue_size,
4881 THUMB2ARM_GLUE_SECTION_NAME);
4882
4883 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4884 globals->vfp11_erratum_glue_size,
4885 VFP11_ERRATUM_VENEER_SECTION_NAME);
4886
4887 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4888 globals->bx_glue_size,
4889 ARM_BX_GLUE_SECTION_NAME);
4890
4891 return TRUE;
4892 }
4893
4894 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4895 returns the symbol identifying the stub. */
4896
4897 static struct elf_link_hash_entry *
4898 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4899 struct elf_link_hash_entry * h)
4900 {
4901 const char * name = h->root.root.string;
4902 asection * s;
4903 char * tmp_name;
4904 struct elf_link_hash_entry * myh;
4905 struct bfd_link_hash_entry * bh;
4906 struct elf32_arm_link_hash_table * globals;
4907 bfd_vma val;
4908 bfd_size_type size;
4909
4910 globals = elf32_arm_hash_table (link_info);
4911
4912 BFD_ASSERT (globals != NULL);
4913 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4914
4915 s = bfd_get_section_by_name
4916 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4917
4918 BFD_ASSERT (s != NULL);
4919
4920 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4921
4922 BFD_ASSERT (tmp_name);
4923
4924 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4925
4926 myh = elf_link_hash_lookup
4927 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4928
4929 if (myh != NULL)
4930 {
4931 /* We've already seen this guy. */
4932 free (tmp_name);
4933 return myh;
4934 }
4935
4936 /* The only trick here is using hash_table->arm_glue_size as the value.
4937 Even though the section isn't allocated yet, this is where we will be
4938 putting it. The +1 on the value marks that the stub has not been
4939 output yet - not that it is a Thumb function. */
4940 bh = NULL;
4941 val = globals->arm_glue_size + 1;
4942 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4943 tmp_name, BSF_GLOBAL, s, val,
4944 NULL, TRUE, FALSE, &bh);
4945
4946 myh = (struct elf_link_hash_entry *) bh;
4947 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4948 myh->forced_local = 1;
4949
4950 free (tmp_name);
4951
4952 if (link_info->shared || globals->root.is_relocatable_executable
4953 || globals->pic_veneer)
4954 size = ARM2THUMB_PIC_GLUE_SIZE;
4955 else if (globals->use_blx)
4956 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4957 else
4958 size = ARM2THUMB_STATIC_GLUE_SIZE;
4959
4960 s->size += size;
4961 globals->arm_glue_size += size;
4962
4963 return myh;
4964 }
4965
4966 /* Allocate space for ARMv4 BX veneers. */
4967
4968 static void
4969 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4970 {
4971 asection * s;
4972 struct elf32_arm_link_hash_table *globals;
4973 char *tmp_name;
4974 struct elf_link_hash_entry *myh;
4975 struct bfd_link_hash_entry *bh;
4976 bfd_vma val;
4977
4978 /* BX PC does not need a veneer. */
4979 if (reg == 15)
4980 return;
4981
4982 globals = elf32_arm_hash_table (link_info);
4983
4984 BFD_ASSERT (globals != NULL);
4985 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4986
4987 /* Check if this veneer has already been allocated. */
4988 if (globals->bx_glue_offset[reg])
4989 return;
4990
4991 s = bfd_get_section_by_name
4992 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4993
4994 BFD_ASSERT (s != NULL);
4995
4996 /* Add symbol for veneer. */
4997 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
4998
4999 BFD_ASSERT (tmp_name);
5000
5001 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5002
5003 myh = elf_link_hash_lookup
5004 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5005
5006 BFD_ASSERT (myh == NULL);
5007
5008 bh = NULL;
5009 val = globals->bx_glue_size;
5010 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5011 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5012 NULL, TRUE, FALSE, &bh);
5013
5014 myh = (struct elf_link_hash_entry *) bh;
5015 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5016 myh->forced_local = 1;
5017
5018 s->size += ARM_BX_VENEER_SIZE;
5019 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5020 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5021 }
5022
5023
5024 /* Add an entry to the code/data map for section SEC. */
5025
5026 static void
5027 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5028 {
5029 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5030 unsigned int newidx;
5031
5032 if (sec_data->map == NULL)
5033 {
5034 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5035 sec_data->mapcount = 0;
5036 sec_data->mapsize = 1;
5037 }
5038
5039 newidx = sec_data->mapcount++;
5040
5041 if (sec_data->mapcount > sec_data->mapsize)
5042 {
5043 sec_data->mapsize *= 2;
5044 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5045 * sizeof (elf32_arm_section_map));
5046 }
5047
5048 if (sec_data->map)
5049 {
5050 sec_data->map[newidx].vma = vma;
5051 sec_data->map[newidx].type = type;
5052 }
5053 }
5054
5055
5056 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5057 veneers are handled for now. */
5058
5059 static bfd_vma
5060 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5061 elf32_vfp11_erratum_list *branch,
5062 bfd *branch_bfd,
5063 asection *branch_sec,
5064 unsigned int offset)
5065 {
5066 asection *s;
5067 struct elf32_arm_link_hash_table *hash_table;
5068 char *tmp_name;
5069 struct elf_link_hash_entry *myh;
5070 struct bfd_link_hash_entry *bh;
5071 bfd_vma val;
5072 struct _arm_elf_section_data *sec_data;
5073 int errcount;
5074 elf32_vfp11_erratum_list *newerr;
5075
5076 hash_table = elf32_arm_hash_table (link_info);
5077
5078 BFD_ASSERT (hash_table != NULL);
5079 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5080
5081 s = bfd_get_section_by_name
5082 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5083
5084 sec_data = elf32_arm_section_data (s);
5085
5086 BFD_ASSERT (s != NULL);
5087
5088 tmp_name = bfd_malloc ((bfd_size_type) strlen
5089 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5090
5091 BFD_ASSERT (tmp_name);
5092
5093 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5094 hash_table->num_vfp11_fixes);
5095
5096 myh = elf_link_hash_lookup
5097 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5098
5099 BFD_ASSERT (myh == NULL);
5100
5101 bh = NULL;
5102 val = hash_table->vfp11_erratum_glue_size;
5103 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5104 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5105 NULL, TRUE, FALSE, &bh);
5106
5107 myh = (struct elf_link_hash_entry *) bh;
5108 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5109 myh->forced_local = 1;
5110
5111 /* Link veneer back to calling location. */
5112 errcount = ++(sec_data->erratumcount);
5113 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5114
5115 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5116 newerr->vma = -1;
5117 newerr->u.v.branch = branch;
5118 newerr->u.v.id = hash_table->num_vfp11_fixes;
5119 branch->u.b.veneer = newerr;
5120
5121 newerr->next = sec_data->erratumlist;
5122 sec_data->erratumlist = newerr;
5123
5124 /* A symbol for the return from the veneer. */
5125 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5126 hash_table->num_vfp11_fixes);
5127
5128 myh = elf_link_hash_lookup
5129 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5130
5131 if (myh != NULL)
5132 abort ();
5133
5134 bh = NULL;
5135 val = offset + 4;
5136 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5137 branch_sec, val, NULL, TRUE, FALSE, &bh);
5138
5139 myh = (struct elf_link_hash_entry *) bh;
5140 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5141 myh->forced_local = 1;
5142
5143 free (tmp_name);
5144
5145 /* Generate a mapping symbol for the veneer section, and explicitly add an
5146 entry for that symbol to the code/data map for the section. */
5147 if (hash_table->vfp11_erratum_glue_size == 0)
5148 {
5149 bh = NULL;
5150 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5151 ever requires this erratum fix. */
5152 _bfd_generic_link_add_one_symbol (link_info,
5153 hash_table->bfd_of_glue_owner, "$a",
5154 BSF_LOCAL, s, 0, NULL,
5155 TRUE, FALSE, &bh);
5156
5157 myh = (struct elf_link_hash_entry *) bh;
5158 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5159 myh->forced_local = 1;
5160
5161 /* The elf32_arm_init_maps function only cares about symbols from input
5162 BFDs. We must make a note of this generated mapping symbol
5163 ourselves so that code byteswapping works properly in
5164 elf32_arm_write_section. */
5165 elf32_arm_section_map_add (s, 'a', 0);
5166 }
5167
5168 s->size += VFP11_ERRATUM_VENEER_SIZE;
5169 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5170 hash_table->num_vfp11_fixes++;
5171
5172 /* The offset of the veneer. */
5173 return val;
5174 }
5175
5176 #define ARM_GLUE_SECTION_FLAGS \
5177 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5178 | SEC_READONLY | SEC_LINKER_CREATED)
5179
5180 /* Create a fake section for use by the ARM backend of the linker. */
5181
5182 static bfd_boolean
5183 arm_make_glue_section (bfd * abfd, const char * name)
5184 {
5185 asection * sec;
5186
5187 sec = bfd_get_section_by_name (abfd, name);
5188 if (sec != NULL)
5189 /* Already made. */
5190 return TRUE;
5191
5192 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5193
5194 if (sec == NULL
5195 || !bfd_set_section_alignment (abfd, sec, 2))
5196 return FALSE;
5197
5198 /* Set the gc mark to prevent the section from being removed by garbage
5199 collection, despite the fact that no relocs refer to this section. */
5200 sec->gc_mark = 1;
5201
5202 return TRUE;
5203 }
5204
5205 /* Add the glue sections to ABFD. This function is called from the
5206 linker scripts in ld/emultempl/{armelf}.em. */
5207
5208 bfd_boolean
5209 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5210 struct bfd_link_info *info)
5211 {
5212 /* If we are only performing a partial
5213 link do not bother adding the glue. */
5214 if (info->relocatable)
5215 return TRUE;
5216
5217 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5218 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5219 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5220 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5221 }
5222
5223 /* Select a BFD to be used to hold the sections used by the glue code.
5224 This function is called from the linker scripts in ld/emultempl/
5225 {armelf/pe}.em. */
5226
5227 bfd_boolean
5228 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5229 {
5230 struct elf32_arm_link_hash_table *globals;
5231
5232 /* If we are only performing a partial link
5233 do not bother getting a bfd to hold the glue. */
5234 if (info->relocatable)
5235 return TRUE;
5236
5237 /* Make sure we don't attach the glue sections to a dynamic object. */
5238 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5239
5240 globals = elf32_arm_hash_table (info);
5241
5242 BFD_ASSERT (globals != NULL);
5243
5244 if (globals->bfd_of_glue_owner != NULL)
5245 return TRUE;
5246
5247 /* Save the bfd for later use. */
5248 globals->bfd_of_glue_owner = abfd;
5249
5250 return TRUE;
5251 }
5252
5253 static void
5254 check_use_blx (struct elf32_arm_link_hash_table *globals)
5255 {
5256 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5257 Tag_CPU_arch) > 2)
5258 globals->use_blx = 1;
5259 }
5260
5261 bfd_boolean
5262 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5263 struct bfd_link_info *link_info)
5264 {
5265 Elf_Internal_Shdr *symtab_hdr;
5266 Elf_Internal_Rela *internal_relocs = NULL;
5267 Elf_Internal_Rela *irel, *irelend;
5268 bfd_byte *contents = NULL;
5269
5270 asection *sec;
5271 struct elf32_arm_link_hash_table *globals;
5272
5273 /* If we are only performing a partial link do not bother
5274 to construct any glue. */
5275 if (link_info->relocatable)
5276 return TRUE;
5277
5278 /* Here we have a bfd that is to be included on the link. We have a
5279 hook to do reloc rummaging, before section sizes are nailed down. */
5280 globals = elf32_arm_hash_table (link_info);
5281
5282 BFD_ASSERT (globals != NULL);
5283
5284 check_use_blx (globals);
5285
5286 if (globals->byteswap_code && !bfd_big_endian (abfd))
5287 {
5288 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5289 abfd);
5290 return FALSE;
5291 }
5292
5293 /* PR 5398: If we have not decided to include any loadable sections in
5294 the output then we will not have a glue owner bfd. This is OK, it
5295 just means that there is nothing else for us to do here. */
5296 if (globals->bfd_of_glue_owner == NULL)
5297 return TRUE;
5298
5299 /* Rummage around all the relocs and map the glue vectors. */
5300 sec = abfd->sections;
5301
5302 if (sec == NULL)
5303 return TRUE;
5304
5305 for (; sec != NULL; sec = sec->next)
5306 {
5307 if (sec->reloc_count == 0)
5308 continue;
5309
5310 if ((sec->flags & SEC_EXCLUDE) != 0)
5311 continue;
5312
5313 symtab_hdr = & elf_symtab_hdr (abfd);
5314
5315 /* Load the relocs. */
5316 internal_relocs
5317 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5318
5319 if (internal_relocs == NULL)
5320 goto error_return;
5321
5322 irelend = internal_relocs + sec->reloc_count;
5323 for (irel = internal_relocs; irel < irelend; irel++)
5324 {
5325 long r_type;
5326 unsigned long r_index;
5327
5328 struct elf_link_hash_entry *h;
5329
5330 r_type = ELF32_R_TYPE (irel->r_info);
5331 r_index = ELF32_R_SYM (irel->r_info);
5332
5333 /* These are the only relocation types we care about. */
5334 if ( r_type != R_ARM_PC24
5335 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5336 continue;
5337
5338 /* Get the section contents if we haven't done so already. */
5339 if (contents == NULL)
5340 {
5341 /* Get cached copy if it exists. */
5342 if (elf_section_data (sec)->this_hdr.contents != NULL)
5343 contents = elf_section_data (sec)->this_hdr.contents;
5344 else
5345 {
5346 /* Go get them off disk. */
5347 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5348 goto error_return;
5349 }
5350 }
5351
5352 if (r_type == R_ARM_V4BX)
5353 {
5354 int reg;
5355
5356 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5357 record_arm_bx_glue (link_info, reg);
5358 continue;
5359 }
5360
5361 /* If the relocation is not against a symbol it cannot concern us. */
5362 h = NULL;
5363
5364 /* We don't care about local symbols. */
5365 if (r_index < symtab_hdr->sh_info)
5366 continue;
5367
5368 /* This is an external symbol. */
5369 r_index -= symtab_hdr->sh_info;
5370 h = (struct elf_link_hash_entry *)
5371 elf_sym_hashes (abfd)[r_index];
5372
5373 /* If the relocation is against a static symbol it must be within
5374 the current section and so cannot be a cross ARM/Thumb relocation. */
5375 if (h == NULL)
5376 continue;
5377
5378 /* If the call will go through a PLT entry then we do not need
5379 glue. */
5380 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5381 continue;
5382
5383 switch (r_type)
5384 {
5385 case R_ARM_PC24:
5386 /* This one is a call from arm code. We need to look up
5387 the target of the call. If it is a thumb target, we
5388 insert glue. */
5389 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5390 record_arm_to_thumb_glue (link_info, h);
5391 break;
5392
5393 default:
5394 abort ();
5395 }
5396 }
5397
5398 if (contents != NULL
5399 && elf_section_data (sec)->this_hdr.contents != contents)
5400 free (contents);
5401 contents = NULL;
5402
5403 if (internal_relocs != NULL
5404 && elf_section_data (sec)->relocs != internal_relocs)
5405 free (internal_relocs);
5406 internal_relocs = NULL;
5407 }
5408
5409 return TRUE;
5410
5411 error_return:
5412 if (contents != NULL
5413 && elf_section_data (sec)->this_hdr.contents != contents)
5414 free (contents);
5415 if (internal_relocs != NULL
5416 && elf_section_data (sec)->relocs != internal_relocs)
5417 free (internal_relocs);
5418
5419 return FALSE;
5420 }
5421 #endif
5422
5423
5424 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5425
5426 void
5427 bfd_elf32_arm_init_maps (bfd *abfd)
5428 {
5429 Elf_Internal_Sym *isymbuf;
5430 Elf_Internal_Shdr *hdr;
5431 unsigned int i, localsyms;
5432
5433 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5434 if (! is_arm_elf (abfd))
5435 return;
5436
5437 if ((abfd->flags & DYNAMIC) != 0)
5438 return;
5439
5440 hdr = & elf_symtab_hdr (abfd);
5441 localsyms = hdr->sh_info;
5442
5443 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5444 should contain the number of local symbols, which should come before any
5445 global symbols. Mapping symbols are always local. */
5446 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5447 NULL);
5448
5449 /* No internal symbols read? Skip this BFD. */
5450 if (isymbuf == NULL)
5451 return;
5452
5453 for (i = 0; i < localsyms; i++)
5454 {
5455 Elf_Internal_Sym *isym = &isymbuf[i];
5456 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5457 const char *name;
5458
5459 if (sec != NULL
5460 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5461 {
5462 name = bfd_elf_string_from_elf_section (abfd,
5463 hdr->sh_link, isym->st_name);
5464
5465 if (bfd_is_arm_special_symbol_name (name,
5466 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5467 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5468 }
5469 }
5470 }
5471
5472
5473 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5474 say what they wanted. */
5475
5476 void
5477 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5478 {
5479 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5480 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5481
5482 if (globals->fix_cortex_a8 == -1)
5483 {
5484 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5485 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5486 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5487 || out_attr[Tag_CPU_arch_profile].i == 0))
5488 globals->fix_cortex_a8 = 1;
5489 else
5490 globals->fix_cortex_a8 = 0;
5491 }
5492 }
5493
5494
5495 void
5496 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5497 {
5498 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5499 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5500
5501 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5502 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5503 {
5504 switch (globals->vfp11_fix)
5505 {
5506 case BFD_ARM_VFP11_FIX_DEFAULT:
5507 case BFD_ARM_VFP11_FIX_NONE:
5508 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5509 break;
5510
5511 default:
5512 /* Give a warning, but do as the user requests anyway. */
5513 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5514 "workaround is not necessary for target architecture"), obfd);
5515 }
5516 }
5517 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5518 /* For earlier architectures, we might need the workaround, but do not
5519 enable it by default. If users is running with broken hardware, they
5520 must enable the erratum fix explicitly. */
5521 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5522 }
5523
5524
5525 enum bfd_arm_vfp11_pipe
5526 {
5527 VFP11_FMAC,
5528 VFP11_LS,
5529 VFP11_DS,
5530 VFP11_BAD
5531 };
5532
5533 /* Return a VFP register number. This is encoded as RX:X for single-precision
5534 registers, or X:RX for double-precision registers, where RX is the group of
5535 four bits in the instruction encoding and X is the single extension bit.
5536 RX and X fields are specified using their lowest (starting) bit. The return
5537 value is:
5538
5539 0...31: single-precision registers s0...s31
5540 32...63: double-precision registers d0...d31.
5541
5542 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5543 encounter VFP3 instructions, so we allow the full range for DP registers. */
5544
5545 static unsigned int
5546 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5547 unsigned int x)
5548 {
5549 if (is_double)
5550 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5551 else
5552 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5553 }
5554
5555 /* Set bits in *WMASK according to a register number REG as encoded by
5556 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5557
5558 static void
5559 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5560 {
5561 if (reg < 32)
5562 *wmask |= 1 << reg;
5563 else if (reg < 48)
5564 *wmask |= 3 << ((reg - 32) * 2);
5565 }
5566
5567 /* Return TRUE if WMASK overwrites anything in REGS. */
5568
5569 static bfd_boolean
5570 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5571 {
5572 int i;
5573
5574 for (i = 0; i < numregs; i++)
5575 {
5576 unsigned int reg = regs[i];
5577
5578 if (reg < 32 && (wmask & (1 << reg)) != 0)
5579 return TRUE;
5580
5581 reg -= 32;
5582
5583 if (reg >= 16)
5584 continue;
5585
5586 if ((wmask & (3 << (reg * 2))) != 0)
5587 return TRUE;
5588 }
5589
5590 return FALSE;
5591 }
5592
5593 /* In this function, we're interested in two things: finding input registers
5594 for VFP data-processing instructions, and finding the set of registers which
5595 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5596 hold the written set, so FLDM etc. are easy to deal with (we're only
5597 interested in 32 SP registers or 16 dp registers, due to the VFP version
5598 implemented by the chip in question). DP registers are marked by setting
5599 both SP registers in the write mask). */
5600
5601 static enum bfd_arm_vfp11_pipe
5602 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5603 int *numregs)
5604 {
5605 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5606 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5607
5608 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5609 {
5610 unsigned int pqrs;
5611 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5612 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5613
5614 pqrs = ((insn & 0x00800000) >> 20)
5615 | ((insn & 0x00300000) >> 19)
5616 | ((insn & 0x00000040) >> 6);
5617
5618 switch (pqrs)
5619 {
5620 case 0: /* fmac[sd]. */
5621 case 1: /* fnmac[sd]. */
5622 case 2: /* fmsc[sd]. */
5623 case 3: /* fnmsc[sd]. */
5624 pipe = VFP11_FMAC;
5625 bfd_arm_vfp11_write_mask (destmask, fd);
5626 regs[0] = fd;
5627 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5628 regs[2] = fm;
5629 *numregs = 3;
5630 break;
5631
5632 case 4: /* fmul[sd]. */
5633 case 5: /* fnmul[sd]. */
5634 case 6: /* fadd[sd]. */
5635 case 7: /* fsub[sd]. */
5636 pipe = VFP11_FMAC;
5637 goto vfp_binop;
5638
5639 case 8: /* fdiv[sd]. */
5640 pipe = VFP11_DS;
5641 vfp_binop:
5642 bfd_arm_vfp11_write_mask (destmask, fd);
5643 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5644 regs[1] = fm;
5645 *numregs = 2;
5646 break;
5647
5648 case 15: /* extended opcode. */
5649 {
5650 unsigned int extn = ((insn >> 15) & 0x1e)
5651 | ((insn >> 7) & 1);
5652
5653 switch (extn)
5654 {
5655 case 0: /* fcpy[sd]. */
5656 case 1: /* fabs[sd]. */
5657 case 2: /* fneg[sd]. */
5658 case 8: /* fcmp[sd]. */
5659 case 9: /* fcmpe[sd]. */
5660 case 10: /* fcmpz[sd]. */
5661 case 11: /* fcmpez[sd]. */
5662 case 16: /* fuito[sd]. */
5663 case 17: /* fsito[sd]. */
5664 case 24: /* ftoui[sd]. */
5665 case 25: /* ftouiz[sd]. */
5666 case 26: /* ftosi[sd]. */
5667 case 27: /* ftosiz[sd]. */
5668 /* These instructions will not bounce due to underflow. */
5669 *numregs = 0;
5670 pipe = VFP11_FMAC;
5671 break;
5672
5673 case 3: /* fsqrt[sd]. */
5674 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5675 registers to cause the erratum in previous instructions. */
5676 bfd_arm_vfp11_write_mask (destmask, fd);
5677 pipe = VFP11_DS;
5678 break;
5679
5680 case 15: /* fcvt{ds,sd}. */
5681 {
5682 int rnum = 0;
5683
5684 bfd_arm_vfp11_write_mask (destmask, fd);
5685
5686 /* Only FCVTSD can underflow. */
5687 if ((insn & 0x100) != 0)
5688 regs[rnum++] = fm;
5689
5690 *numregs = rnum;
5691
5692 pipe = VFP11_FMAC;
5693 }
5694 break;
5695
5696 default:
5697 return VFP11_BAD;
5698 }
5699 }
5700 break;
5701
5702 default:
5703 return VFP11_BAD;
5704 }
5705 }
5706 /* Two-register transfer. */
5707 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5708 {
5709 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5710
5711 if ((insn & 0x100000) == 0)
5712 {
5713 if (is_double)
5714 bfd_arm_vfp11_write_mask (destmask, fm);
5715 else
5716 {
5717 bfd_arm_vfp11_write_mask (destmask, fm);
5718 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5719 }
5720 }
5721
5722 pipe = VFP11_LS;
5723 }
5724 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5725 {
5726 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5727 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5728
5729 switch (puw)
5730 {
5731 case 0: /* Two-reg transfer. We should catch these above. */
5732 abort ();
5733
5734 case 2: /* fldm[sdx]. */
5735 case 3:
5736 case 5:
5737 {
5738 unsigned int i, offset = insn & 0xff;
5739
5740 if (is_double)
5741 offset >>= 1;
5742
5743 for (i = fd; i < fd + offset; i++)
5744 bfd_arm_vfp11_write_mask (destmask, i);
5745 }
5746 break;
5747
5748 case 4: /* fld[sd]. */
5749 case 6:
5750 bfd_arm_vfp11_write_mask (destmask, fd);
5751 break;
5752
5753 default:
5754 return VFP11_BAD;
5755 }
5756
5757 pipe = VFP11_LS;
5758 }
5759 /* Single-register transfer. Note L==0. */
5760 else if ((insn & 0x0f100e10) == 0x0e000a10)
5761 {
5762 unsigned int opcode = (insn >> 21) & 7;
5763 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5764
5765 switch (opcode)
5766 {
5767 case 0: /* fmsr/fmdlr. */
5768 case 1: /* fmdhr. */
5769 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5770 destination register. I don't know if this is exactly right,
5771 but it is the conservative choice. */
5772 bfd_arm_vfp11_write_mask (destmask, fn);
5773 break;
5774
5775 case 7: /* fmxr. */
5776 break;
5777 }
5778
5779 pipe = VFP11_LS;
5780 }
5781
5782 return pipe;
5783 }
5784
5785
5786 static int elf32_arm_compare_mapping (const void * a, const void * b);
5787
5788
5789 /* Look for potentially-troublesome code sequences which might trigger the
5790 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5791 (available from ARM) for details of the erratum. A short version is
5792 described in ld.texinfo. */
5793
5794 bfd_boolean
5795 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5796 {
5797 asection *sec;
5798 bfd_byte *contents = NULL;
5799 int state = 0;
5800 int regs[3], numregs = 0;
5801 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5802 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5803
5804 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5805 The states transition as follows:
5806
5807 0 -> 1 (vector) or 0 -> 2 (scalar)
5808 A VFP FMAC-pipeline instruction has been seen. Fill
5809 regs[0]..regs[numregs-1] with its input operands. Remember this
5810 instruction in 'first_fmac'.
5811
5812 1 -> 2
5813 Any instruction, except for a VFP instruction which overwrites
5814 regs[*].
5815
5816 1 -> 3 [ -> 0 ] or
5817 2 -> 3 [ -> 0 ]
5818 A VFP instruction has been seen which overwrites any of regs[*].
5819 We must make a veneer! Reset state to 0 before examining next
5820 instruction.
5821
5822 2 -> 0
5823 If we fail to match anything in state 2, reset to state 0 and reset
5824 the instruction pointer to the instruction after 'first_fmac'.
5825
5826 If the VFP11 vector mode is in use, there must be at least two unrelated
5827 instructions between anti-dependent VFP11 instructions to properly avoid
5828 triggering the erratum, hence the use of the extra state 1. */
5829
5830 /* If we are only performing a partial link do not bother
5831 to construct any glue. */
5832 if (link_info->relocatable)
5833 return TRUE;
5834
5835 /* Skip if this bfd does not correspond to an ELF image. */
5836 if (! is_arm_elf (abfd))
5837 return TRUE;
5838
5839 /* We should have chosen a fix type by the time we get here. */
5840 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5841
5842 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5843 return TRUE;
5844
5845 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5846 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5847 return TRUE;
5848
5849 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5850 {
5851 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5852 struct _arm_elf_section_data *sec_data;
5853
5854 /* If we don't have executable progbits, we're not interested in this
5855 section. Also skip if section is to be excluded. */
5856 if (elf_section_type (sec) != SHT_PROGBITS
5857 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5858 || (sec->flags & SEC_EXCLUDE) != 0
5859 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5860 || sec->output_section == bfd_abs_section_ptr
5861 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5862 continue;
5863
5864 sec_data = elf32_arm_section_data (sec);
5865
5866 if (sec_data->mapcount == 0)
5867 continue;
5868
5869 if (elf_section_data (sec)->this_hdr.contents != NULL)
5870 contents = elf_section_data (sec)->this_hdr.contents;
5871 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5872 goto error_return;
5873
5874 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5875 elf32_arm_compare_mapping);
5876
5877 for (span = 0; span < sec_data->mapcount; span++)
5878 {
5879 unsigned int span_start = sec_data->map[span].vma;
5880 unsigned int span_end = (span == sec_data->mapcount - 1)
5881 ? sec->size : sec_data->map[span + 1].vma;
5882 char span_type = sec_data->map[span].type;
5883
5884 /* FIXME: Only ARM mode is supported at present. We may need to
5885 support Thumb-2 mode also at some point. */
5886 if (span_type != 'a')
5887 continue;
5888
5889 for (i = span_start; i < span_end;)
5890 {
5891 unsigned int next_i = i + 4;
5892 unsigned int insn = bfd_big_endian (abfd)
5893 ? (contents[i] << 24)
5894 | (contents[i + 1] << 16)
5895 | (contents[i + 2] << 8)
5896 | contents[i + 3]
5897 : (contents[i + 3] << 24)
5898 | (contents[i + 2] << 16)
5899 | (contents[i + 1] << 8)
5900 | contents[i];
5901 unsigned int writemask = 0;
5902 enum bfd_arm_vfp11_pipe pipe;
5903
5904 switch (state)
5905 {
5906 case 0:
5907 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5908 &numregs);
5909 /* I'm assuming the VFP11 erratum can trigger with denorm
5910 operands on either the FMAC or the DS pipeline. This might
5911 lead to slightly overenthusiastic veneer insertion. */
5912 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5913 {
5914 state = use_vector ? 1 : 2;
5915 first_fmac = i;
5916 veneer_of_insn = insn;
5917 }
5918 break;
5919
5920 case 1:
5921 {
5922 int other_regs[3], other_numregs;
5923 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5924 other_regs,
5925 &other_numregs);
5926 if (pipe != VFP11_BAD
5927 && bfd_arm_vfp11_antidependency (writemask, regs,
5928 numregs))
5929 state = 3;
5930 else
5931 state = 2;
5932 }
5933 break;
5934
5935 case 2:
5936 {
5937 int other_regs[3], other_numregs;
5938 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5939 other_regs,
5940 &other_numregs);
5941 if (pipe != VFP11_BAD
5942 && bfd_arm_vfp11_antidependency (writemask, regs,
5943 numregs))
5944 state = 3;
5945 else
5946 {
5947 state = 0;
5948 next_i = first_fmac + 4;
5949 }
5950 }
5951 break;
5952
5953 case 3:
5954 abort (); /* Should be unreachable. */
5955 }
5956
5957 if (state == 3)
5958 {
5959 elf32_vfp11_erratum_list *newerr
5960 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5961 int errcount;
5962
5963 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5964
5965 newerr->u.b.vfp_insn = veneer_of_insn;
5966
5967 switch (span_type)
5968 {
5969 case 'a':
5970 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5971 break;
5972
5973 default:
5974 abort ();
5975 }
5976
5977 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5978 first_fmac);
5979
5980 newerr->vma = -1;
5981
5982 newerr->next = sec_data->erratumlist;
5983 sec_data->erratumlist = newerr;
5984
5985 state = 0;
5986 }
5987
5988 i = next_i;
5989 }
5990 }
5991
5992 if (contents != NULL
5993 && elf_section_data (sec)->this_hdr.contents != contents)
5994 free (contents);
5995 contents = NULL;
5996 }
5997
5998 return TRUE;
5999
6000 error_return:
6001 if (contents != NULL
6002 && elf_section_data (sec)->this_hdr.contents != contents)
6003 free (contents);
6004
6005 return FALSE;
6006 }
6007
6008 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6009 after sections have been laid out, using specially-named symbols. */
6010
6011 void
6012 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6013 struct bfd_link_info *link_info)
6014 {
6015 asection *sec;
6016 struct elf32_arm_link_hash_table *globals;
6017 char *tmp_name;
6018
6019 if (link_info->relocatable)
6020 return;
6021
6022 /* Skip if this bfd does not correspond to an ELF image. */
6023 if (! is_arm_elf (abfd))
6024 return;
6025
6026 globals = elf32_arm_hash_table (link_info);
6027
6028 tmp_name = bfd_malloc ((bfd_size_type) strlen
6029 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6030
6031 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6032 {
6033 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6034 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6035
6036 for (; errnode != NULL; errnode = errnode->next)
6037 {
6038 struct elf_link_hash_entry *myh;
6039 bfd_vma vma;
6040
6041 switch (errnode->type)
6042 {
6043 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6044 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6045 /* Find veneer symbol. */
6046 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6047 errnode->u.b.veneer->u.v.id);
6048
6049 myh = elf_link_hash_lookup
6050 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6051
6052 if (myh == NULL)
6053 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6054 "`%s'"), abfd, tmp_name);
6055
6056 vma = myh->root.u.def.section->output_section->vma
6057 + myh->root.u.def.section->output_offset
6058 + myh->root.u.def.value;
6059
6060 errnode->u.b.veneer->vma = vma;
6061 break;
6062
6063 case VFP11_ERRATUM_ARM_VENEER:
6064 case VFP11_ERRATUM_THUMB_VENEER:
6065 /* Find return location. */
6066 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6067 errnode->u.v.id);
6068
6069 myh = elf_link_hash_lookup
6070 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6071
6072 if (myh == NULL)
6073 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6074 "`%s'"), abfd, tmp_name);
6075
6076 vma = myh->root.u.def.section->output_section->vma
6077 + myh->root.u.def.section->output_offset
6078 + myh->root.u.def.value;
6079
6080 errnode->u.v.branch->vma = vma;
6081 break;
6082
6083 default:
6084 abort ();
6085 }
6086 }
6087 }
6088
6089 free (tmp_name);
6090 }
6091
6092
6093 /* Set target relocation values needed during linking. */
6094
6095 void
6096 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6097 struct bfd_link_info *link_info,
6098 int target1_is_rel,
6099 char * target2_type,
6100 int fix_v4bx,
6101 int use_blx,
6102 bfd_arm_vfp11_fix vfp11_fix,
6103 int no_enum_warn, int no_wchar_warn,
6104 int pic_veneer, int fix_cortex_a8)
6105 {
6106 struct elf32_arm_link_hash_table *globals;
6107
6108 globals = elf32_arm_hash_table (link_info);
6109
6110 globals->target1_is_rel = target1_is_rel;
6111 if (strcmp (target2_type, "rel") == 0)
6112 globals->target2_reloc = R_ARM_REL32;
6113 else if (strcmp (target2_type, "abs") == 0)
6114 globals->target2_reloc = R_ARM_ABS32;
6115 else if (strcmp (target2_type, "got-rel") == 0)
6116 globals->target2_reloc = R_ARM_GOT_PREL;
6117 else
6118 {
6119 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6120 target2_type);
6121 }
6122 globals->fix_v4bx = fix_v4bx;
6123 globals->use_blx |= use_blx;
6124 globals->vfp11_fix = vfp11_fix;
6125 globals->pic_veneer = pic_veneer;
6126 globals->fix_cortex_a8 = fix_cortex_a8;
6127
6128 BFD_ASSERT (is_arm_elf (output_bfd));
6129 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6130 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6131 }
6132
6133 /* Replace the target offset of a Thumb bl or b.w instruction. */
6134
6135 static void
6136 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6137 {
6138 bfd_vma upper;
6139 bfd_vma lower;
6140 int reloc_sign;
6141
6142 BFD_ASSERT ((offset & 1) == 0);
6143
6144 upper = bfd_get_16 (abfd, insn);
6145 lower = bfd_get_16 (abfd, insn + 2);
6146 reloc_sign = (offset < 0) ? 1 : 0;
6147 upper = (upper & ~(bfd_vma) 0x7ff)
6148 | ((offset >> 12) & 0x3ff)
6149 | (reloc_sign << 10);
6150 lower = (lower & ~(bfd_vma) 0x2fff)
6151 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6152 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6153 | ((offset >> 1) & 0x7ff);
6154 bfd_put_16 (abfd, upper, insn);
6155 bfd_put_16 (abfd, lower, insn + 2);
6156 }
6157
6158 /* Thumb code calling an ARM function. */
6159
6160 static int
6161 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6162 const char * name,
6163 bfd * input_bfd,
6164 bfd * output_bfd,
6165 asection * input_section,
6166 bfd_byte * hit_data,
6167 asection * sym_sec,
6168 bfd_vma offset,
6169 bfd_signed_vma addend,
6170 bfd_vma val,
6171 char **error_message)
6172 {
6173 asection * s = 0;
6174 bfd_vma my_offset;
6175 long int ret_offset;
6176 struct elf_link_hash_entry * myh;
6177 struct elf32_arm_link_hash_table * globals;
6178
6179 myh = find_thumb_glue (info, name, error_message);
6180 if (myh == NULL)
6181 return FALSE;
6182
6183 globals = elf32_arm_hash_table (info);
6184
6185 BFD_ASSERT (globals != NULL);
6186 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6187
6188 my_offset = myh->root.u.def.value;
6189
6190 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6191 THUMB2ARM_GLUE_SECTION_NAME);
6192
6193 BFD_ASSERT (s != NULL);
6194 BFD_ASSERT (s->contents != NULL);
6195 BFD_ASSERT (s->output_section != NULL);
6196
6197 if ((my_offset & 0x01) == 0x01)
6198 {
6199 if (sym_sec != NULL
6200 && sym_sec->owner != NULL
6201 && !INTERWORK_FLAG (sym_sec->owner))
6202 {
6203 (*_bfd_error_handler)
6204 (_("%B(%s): warning: interworking not enabled.\n"
6205 " first occurrence: %B: thumb call to arm"),
6206 sym_sec->owner, input_bfd, name);
6207
6208 return FALSE;
6209 }
6210
6211 --my_offset;
6212 myh->root.u.def.value = my_offset;
6213
6214 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6215 s->contents + my_offset);
6216
6217 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6218 s->contents + my_offset + 2);
6219
6220 ret_offset =
6221 /* Address of destination of the stub. */
6222 ((bfd_signed_vma) val)
6223 - ((bfd_signed_vma)
6224 /* Offset from the start of the current section
6225 to the start of the stubs. */
6226 (s->output_offset
6227 /* Offset of the start of this stub from the start of the stubs. */
6228 + my_offset
6229 /* Address of the start of the current section. */
6230 + s->output_section->vma)
6231 /* The branch instruction is 4 bytes into the stub. */
6232 + 4
6233 /* ARM branches work from the pc of the instruction + 8. */
6234 + 8);
6235
6236 put_arm_insn (globals, output_bfd,
6237 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6238 s->contents + my_offset + 4);
6239 }
6240
6241 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6242
6243 /* Now go back and fix up the original BL insn to point to here. */
6244 ret_offset =
6245 /* Address of where the stub is located. */
6246 (s->output_section->vma + s->output_offset + my_offset)
6247 /* Address of where the BL is located. */
6248 - (input_section->output_section->vma + input_section->output_offset
6249 + offset)
6250 /* Addend in the relocation. */
6251 - addend
6252 /* Biassing for PC-relative addressing. */
6253 - 8;
6254
6255 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6256
6257 return TRUE;
6258 }
6259
6260 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6261
6262 static struct elf_link_hash_entry *
6263 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6264 const char * name,
6265 bfd * input_bfd,
6266 bfd * output_bfd,
6267 asection * sym_sec,
6268 bfd_vma val,
6269 asection * s,
6270 char ** error_message)
6271 {
6272 bfd_vma my_offset;
6273 long int ret_offset;
6274 struct elf_link_hash_entry * myh;
6275 struct elf32_arm_link_hash_table * globals;
6276
6277 myh = find_arm_glue (info, name, error_message);
6278 if (myh == NULL)
6279 return NULL;
6280
6281 globals = elf32_arm_hash_table (info);
6282
6283 BFD_ASSERT (globals != NULL);
6284 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6285
6286 my_offset = myh->root.u.def.value;
6287
6288 if ((my_offset & 0x01) == 0x01)
6289 {
6290 if (sym_sec != NULL
6291 && sym_sec->owner != NULL
6292 && !INTERWORK_FLAG (sym_sec->owner))
6293 {
6294 (*_bfd_error_handler)
6295 (_("%B(%s): warning: interworking not enabled.\n"
6296 " first occurrence: %B: arm call to thumb"),
6297 sym_sec->owner, input_bfd, name);
6298 }
6299
6300 --my_offset;
6301 myh->root.u.def.value = my_offset;
6302
6303 if (info->shared || globals->root.is_relocatable_executable
6304 || globals->pic_veneer)
6305 {
6306 /* For relocatable objects we can't use absolute addresses,
6307 so construct the address from a relative offset. */
6308 /* TODO: If the offset is small it's probably worth
6309 constructing the address with adds. */
6310 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6311 s->contents + my_offset);
6312 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6313 s->contents + my_offset + 4);
6314 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6315 s->contents + my_offset + 8);
6316 /* Adjust the offset by 4 for the position of the add,
6317 and 8 for the pipeline offset. */
6318 ret_offset = (val - (s->output_offset
6319 + s->output_section->vma
6320 + my_offset + 12))
6321 | 1;
6322 bfd_put_32 (output_bfd, ret_offset,
6323 s->contents + my_offset + 12);
6324 }
6325 else if (globals->use_blx)
6326 {
6327 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6328 s->contents + my_offset);
6329
6330 /* It's a thumb address. Add the low order bit. */
6331 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6332 s->contents + my_offset + 4);
6333 }
6334 else
6335 {
6336 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6337 s->contents + my_offset);
6338
6339 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6340 s->contents + my_offset + 4);
6341
6342 /* It's a thumb address. Add the low order bit. */
6343 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6344 s->contents + my_offset + 8);
6345
6346 my_offset += 12;
6347 }
6348 }
6349
6350 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6351
6352 return myh;
6353 }
6354
6355 /* Arm code calling a Thumb function. */
6356
6357 static int
6358 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6359 const char * name,
6360 bfd * input_bfd,
6361 bfd * output_bfd,
6362 asection * input_section,
6363 bfd_byte * hit_data,
6364 asection * sym_sec,
6365 bfd_vma offset,
6366 bfd_signed_vma addend,
6367 bfd_vma val,
6368 char **error_message)
6369 {
6370 unsigned long int tmp;
6371 bfd_vma my_offset;
6372 asection * s;
6373 long int ret_offset;
6374 struct elf_link_hash_entry * myh;
6375 struct elf32_arm_link_hash_table * globals;
6376
6377 globals = elf32_arm_hash_table (info);
6378
6379 BFD_ASSERT (globals != NULL);
6380 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6381
6382 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6383 ARM2THUMB_GLUE_SECTION_NAME);
6384 BFD_ASSERT (s != NULL);
6385 BFD_ASSERT (s->contents != NULL);
6386 BFD_ASSERT (s->output_section != NULL);
6387
6388 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6389 sym_sec, val, s, error_message);
6390 if (!myh)
6391 return FALSE;
6392
6393 my_offset = myh->root.u.def.value;
6394 tmp = bfd_get_32 (input_bfd, hit_data);
6395 tmp = tmp & 0xFF000000;
6396
6397 /* Somehow these are both 4 too far, so subtract 8. */
6398 ret_offset = (s->output_offset
6399 + my_offset
6400 + s->output_section->vma
6401 - (input_section->output_offset
6402 + input_section->output_section->vma
6403 + offset + addend)
6404 - 8);
6405
6406 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6407
6408 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6409
6410 return TRUE;
6411 }
6412
6413 /* Populate Arm stub for an exported Thumb function. */
6414
6415 static bfd_boolean
6416 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6417 {
6418 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6419 asection * s;
6420 struct elf_link_hash_entry * myh;
6421 struct elf32_arm_link_hash_entry *eh;
6422 struct elf32_arm_link_hash_table * globals;
6423 asection *sec;
6424 bfd_vma val;
6425 char *error_message;
6426
6427 eh = elf32_arm_hash_entry (h);
6428 /* Allocate stubs for exported Thumb functions on v4t. */
6429 if (eh->export_glue == NULL)
6430 return TRUE;
6431
6432 globals = elf32_arm_hash_table (info);
6433
6434 BFD_ASSERT (globals != NULL);
6435 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6436
6437 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6438 ARM2THUMB_GLUE_SECTION_NAME);
6439 BFD_ASSERT (s != NULL);
6440 BFD_ASSERT (s->contents != NULL);
6441 BFD_ASSERT (s->output_section != NULL);
6442
6443 sec = eh->export_glue->root.u.def.section;
6444
6445 BFD_ASSERT (sec->output_section != NULL);
6446
6447 val = eh->export_glue->root.u.def.value + sec->output_offset
6448 + sec->output_section->vma;
6449
6450 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6451 h->root.u.def.section->owner,
6452 globals->obfd, sec, val, s,
6453 &error_message);
6454 BFD_ASSERT (myh);
6455 return TRUE;
6456 }
6457
6458 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6459
6460 static bfd_vma
6461 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6462 {
6463 bfd_byte *p;
6464 bfd_vma glue_addr;
6465 asection *s;
6466 struct elf32_arm_link_hash_table *globals;
6467
6468 globals = elf32_arm_hash_table (info);
6469
6470 BFD_ASSERT (globals != NULL);
6471 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6472
6473 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6474 ARM_BX_GLUE_SECTION_NAME);
6475 BFD_ASSERT (s != NULL);
6476 BFD_ASSERT (s->contents != NULL);
6477 BFD_ASSERT (s->output_section != NULL);
6478
6479 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6480
6481 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6482
6483 if ((globals->bx_glue_offset[reg] & 1) == 0)
6484 {
6485 p = s->contents + glue_addr;
6486 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6487 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6488 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6489 globals->bx_glue_offset[reg] |= 1;
6490 }
6491
6492 return glue_addr + s->output_section->vma + s->output_offset;
6493 }
6494
6495 /* Generate Arm stubs for exported Thumb symbols. */
6496 static void
6497 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6498 struct bfd_link_info *link_info)
6499 {
6500 struct elf32_arm_link_hash_table * globals;
6501
6502 if (link_info == NULL)
6503 /* Ignore this if we are not called by the ELF backend linker. */
6504 return;
6505
6506 globals = elf32_arm_hash_table (link_info);
6507 /* If blx is available then exported Thumb symbols are OK and there is
6508 nothing to do. */
6509 if (globals->use_blx)
6510 return;
6511
6512 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6513 link_info);
6514 }
6515
6516 /* Some relocations map to different relocations depending on the
6517 target. Return the real relocation. */
6518
6519 static int
6520 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6521 int r_type)
6522 {
6523 switch (r_type)
6524 {
6525 case R_ARM_TARGET1:
6526 if (globals->target1_is_rel)
6527 return R_ARM_REL32;
6528 else
6529 return R_ARM_ABS32;
6530
6531 case R_ARM_TARGET2:
6532 return globals->target2_reloc;
6533
6534 default:
6535 return r_type;
6536 }
6537 }
6538
6539 /* Return the base VMA address which should be subtracted from real addresses
6540 when resolving @dtpoff relocation.
6541 This is PT_TLS segment p_vaddr. */
6542
6543 static bfd_vma
6544 dtpoff_base (struct bfd_link_info *info)
6545 {
6546 /* If tls_sec is NULL, we should have signalled an error already. */
6547 if (elf_hash_table (info)->tls_sec == NULL)
6548 return 0;
6549 return elf_hash_table (info)->tls_sec->vma;
6550 }
6551
6552 /* Return the relocation value for @tpoff relocation
6553 if STT_TLS virtual address is ADDRESS. */
6554
6555 static bfd_vma
6556 tpoff (struct bfd_link_info *info, bfd_vma address)
6557 {
6558 struct elf_link_hash_table *htab = elf_hash_table (info);
6559 bfd_vma base;
6560
6561 /* If tls_sec is NULL, we should have signalled an error already. */
6562 if (htab->tls_sec == NULL)
6563 return 0;
6564 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6565 return address - htab->tls_sec->vma + base;
6566 }
6567
6568 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6569 VALUE is the relocation value. */
6570
6571 static bfd_reloc_status_type
6572 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6573 {
6574 if (value > 0xfff)
6575 return bfd_reloc_overflow;
6576
6577 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6578 bfd_put_32 (abfd, value, data);
6579 return bfd_reloc_ok;
6580 }
6581
6582 /* For a given value of n, calculate the value of G_n as required to
6583 deal with group relocations. We return it in the form of an
6584 encoded constant-and-rotation, together with the final residual. If n is
6585 specified as less than zero, then final_residual is filled with the
6586 input value and no further action is performed. */
6587
6588 static bfd_vma
6589 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6590 {
6591 int current_n;
6592 bfd_vma g_n;
6593 bfd_vma encoded_g_n = 0;
6594 bfd_vma residual = value; /* Also known as Y_n. */
6595
6596 for (current_n = 0; current_n <= n; current_n++)
6597 {
6598 int shift;
6599
6600 /* Calculate which part of the value to mask. */
6601 if (residual == 0)
6602 shift = 0;
6603 else
6604 {
6605 int msb;
6606
6607 /* Determine the most significant bit in the residual and
6608 align the resulting value to a 2-bit boundary. */
6609 for (msb = 30; msb >= 0; msb -= 2)
6610 if (residual & (3 << msb))
6611 break;
6612
6613 /* The desired shift is now (msb - 6), or zero, whichever
6614 is the greater. */
6615 shift = msb - 6;
6616 if (shift < 0)
6617 shift = 0;
6618 }
6619
6620 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6621 g_n = residual & (0xff << shift);
6622 encoded_g_n = (g_n >> shift)
6623 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6624
6625 /* Calculate the residual for the next time around. */
6626 residual &= ~g_n;
6627 }
6628
6629 *final_residual = residual;
6630
6631 return encoded_g_n;
6632 }
6633
6634 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6635 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6636
6637 static int
6638 identify_add_or_sub (bfd_vma insn)
6639 {
6640 int opcode = insn & 0x1e00000;
6641
6642 if (opcode == 1 << 23) /* ADD */
6643 return 1;
6644
6645 if (opcode == 1 << 22) /* SUB */
6646 return -1;
6647
6648 return 0;
6649 }
6650
6651 /* Perform a relocation as part of a final link. */
6652
6653 static bfd_reloc_status_type
6654 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6655 bfd * input_bfd,
6656 bfd * output_bfd,
6657 asection * input_section,
6658 bfd_byte * contents,
6659 Elf_Internal_Rela * rel,
6660 bfd_vma value,
6661 struct bfd_link_info * info,
6662 asection * sym_sec,
6663 const char * sym_name,
6664 int sym_flags,
6665 struct elf_link_hash_entry * h,
6666 bfd_boolean * unresolved_reloc_p,
6667 char ** error_message)
6668 {
6669 unsigned long r_type = howto->type;
6670 unsigned long r_symndx;
6671 bfd_byte * hit_data = contents + rel->r_offset;
6672 bfd * dynobj = NULL;
6673 Elf_Internal_Shdr * symtab_hdr;
6674 struct elf_link_hash_entry ** sym_hashes;
6675 bfd_vma * local_got_offsets;
6676 asection * sgot = NULL;
6677 asection * splt = NULL;
6678 asection * sreloc = NULL;
6679 bfd_vma addend;
6680 bfd_signed_vma signed_addend;
6681 struct elf32_arm_link_hash_table * globals;
6682
6683 globals = elf32_arm_hash_table (info);
6684
6685 BFD_ASSERT (is_arm_elf (input_bfd));
6686
6687 /* Some relocation types map to different relocations depending on the
6688 target. We pick the right one here. */
6689 r_type = arm_real_reloc_type (globals, r_type);
6690 if (r_type != howto->type)
6691 howto = elf32_arm_howto_from_type (r_type);
6692
6693 /* If the start address has been set, then set the EF_ARM_HASENTRY
6694 flag. Setting this more than once is redundant, but the cost is
6695 not too high, and it keeps the code simple.
6696
6697 The test is done here, rather than somewhere else, because the
6698 start address is only set just before the final link commences.
6699
6700 Note - if the user deliberately sets a start address of 0, the
6701 flag will not be set. */
6702 if (bfd_get_start_address (output_bfd) != 0)
6703 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6704
6705 dynobj = elf_hash_table (info)->dynobj;
6706 if (dynobj)
6707 {
6708 sgot = bfd_get_section_by_name (dynobj, ".got");
6709 splt = bfd_get_section_by_name (dynobj, ".plt");
6710 }
6711 symtab_hdr = & elf_symtab_hdr (input_bfd);
6712 sym_hashes = elf_sym_hashes (input_bfd);
6713 local_got_offsets = elf_local_got_offsets (input_bfd);
6714 r_symndx = ELF32_R_SYM (rel->r_info);
6715
6716 if (globals->use_rel)
6717 {
6718 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6719
6720 if (addend & ((howto->src_mask + 1) >> 1))
6721 {
6722 signed_addend = -1;
6723 signed_addend &= ~ howto->src_mask;
6724 signed_addend |= addend;
6725 }
6726 else
6727 signed_addend = addend;
6728 }
6729 else
6730 addend = signed_addend = rel->r_addend;
6731
6732 switch (r_type)
6733 {
6734 case R_ARM_NONE:
6735 /* We don't need to find a value for this symbol. It's just a
6736 marker. */
6737 *unresolved_reloc_p = FALSE;
6738 return bfd_reloc_ok;
6739
6740 case R_ARM_ABS12:
6741 if (!globals->vxworks_p)
6742 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6743
6744 case R_ARM_PC24:
6745 case R_ARM_ABS32:
6746 case R_ARM_ABS32_NOI:
6747 case R_ARM_REL32:
6748 case R_ARM_REL32_NOI:
6749 case R_ARM_CALL:
6750 case R_ARM_JUMP24:
6751 case R_ARM_XPC25:
6752 case R_ARM_PREL31:
6753 case R_ARM_PLT32:
6754 /* Handle relocations which should use the PLT entry. ABS32/REL32
6755 will use the symbol's value, which may point to a PLT entry, but we
6756 don't need to handle that here. If we created a PLT entry, all
6757 branches in this object should go to it, except if the PLT is too
6758 far away, in which case a long branch stub should be inserted. */
6759 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6760 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6761 && r_type != R_ARM_CALL
6762 && r_type != R_ARM_JUMP24
6763 && r_type != R_ARM_PLT32)
6764 && h != NULL
6765 && splt != NULL
6766 && h->plt.offset != (bfd_vma) -1)
6767 {
6768 /* If we've created a .plt section, and assigned a PLT entry to
6769 this function, it should not be known to bind locally. If
6770 it were, we would have cleared the PLT entry. */
6771 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6772
6773 value = (splt->output_section->vma
6774 + splt->output_offset
6775 + h->plt.offset);
6776 *unresolved_reloc_p = FALSE;
6777 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6778 contents, rel->r_offset, value,
6779 rel->r_addend);
6780 }
6781
6782 /* When generating a shared object or relocatable executable, these
6783 relocations are copied into the output file to be resolved at
6784 run time. */
6785 if ((info->shared || globals->root.is_relocatable_executable)
6786 && (input_section->flags & SEC_ALLOC)
6787 && !(elf32_arm_hash_table (info)->vxworks_p
6788 && strcmp (input_section->output_section->name,
6789 ".tls_vars") == 0)
6790 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6791 || !SYMBOL_CALLS_LOCAL (info, h))
6792 && (h == NULL
6793 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6794 || h->root.type != bfd_link_hash_undefweak)
6795 && r_type != R_ARM_PC24
6796 && r_type != R_ARM_CALL
6797 && r_type != R_ARM_JUMP24
6798 && r_type != R_ARM_PREL31
6799 && r_type != R_ARM_PLT32)
6800 {
6801 Elf_Internal_Rela outrel;
6802 bfd_byte *loc;
6803 bfd_boolean skip, relocate;
6804
6805 *unresolved_reloc_p = FALSE;
6806
6807 if (sreloc == NULL)
6808 {
6809 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6810 ! globals->use_rel);
6811
6812 if (sreloc == NULL)
6813 return bfd_reloc_notsupported;
6814 }
6815
6816 skip = FALSE;
6817 relocate = FALSE;
6818
6819 outrel.r_addend = addend;
6820 outrel.r_offset =
6821 _bfd_elf_section_offset (output_bfd, info, input_section,
6822 rel->r_offset);
6823 if (outrel.r_offset == (bfd_vma) -1)
6824 skip = TRUE;
6825 else if (outrel.r_offset == (bfd_vma) -2)
6826 skip = TRUE, relocate = TRUE;
6827 outrel.r_offset += (input_section->output_section->vma
6828 + input_section->output_offset);
6829
6830 if (skip)
6831 memset (&outrel, 0, sizeof outrel);
6832 else if (h != NULL
6833 && h->dynindx != -1
6834 && (!info->shared
6835 || !info->symbolic
6836 || !h->def_regular))
6837 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6838 else
6839 {
6840 int symbol;
6841
6842 /* This symbol is local, or marked to become local. */
6843 if (sym_flags == STT_ARM_TFUNC)
6844 value |= 1;
6845 if (globals->symbian_p)
6846 {
6847 asection *osec;
6848
6849 /* On Symbian OS, the data segment and text segement
6850 can be relocated independently. Therefore, we
6851 must indicate the segment to which this
6852 relocation is relative. The BPABI allows us to
6853 use any symbol in the right segment; we just use
6854 the section symbol as it is convenient. (We
6855 cannot use the symbol given by "h" directly as it
6856 will not appear in the dynamic symbol table.)
6857
6858 Note that the dynamic linker ignores the section
6859 symbol value, so we don't subtract osec->vma
6860 from the emitted reloc addend. */
6861 if (sym_sec)
6862 osec = sym_sec->output_section;
6863 else
6864 osec = input_section->output_section;
6865 symbol = elf_section_data (osec)->dynindx;
6866 if (symbol == 0)
6867 {
6868 struct elf_link_hash_table *htab = elf_hash_table (info);
6869
6870 if ((osec->flags & SEC_READONLY) == 0
6871 && htab->data_index_section != NULL)
6872 osec = htab->data_index_section;
6873 else
6874 osec = htab->text_index_section;
6875 symbol = elf_section_data (osec)->dynindx;
6876 }
6877 BFD_ASSERT (symbol != 0);
6878 }
6879 else
6880 /* On SVR4-ish systems, the dynamic loader cannot
6881 relocate the text and data segments independently,
6882 so the symbol does not matter. */
6883 symbol = 0;
6884 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6885 if (globals->use_rel)
6886 relocate = TRUE;
6887 else
6888 outrel.r_addend += value;
6889 }
6890
6891 loc = sreloc->contents;
6892 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6893 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6894
6895 /* If this reloc is against an external symbol, we do not want to
6896 fiddle with the addend. Otherwise, we need to include the symbol
6897 value so that it becomes an addend for the dynamic reloc. */
6898 if (! relocate)
6899 return bfd_reloc_ok;
6900
6901 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6902 contents, rel->r_offset, value,
6903 (bfd_vma) 0);
6904 }
6905 else switch (r_type)
6906 {
6907 case R_ARM_ABS12:
6908 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6909
6910 case R_ARM_XPC25: /* Arm BLX instruction. */
6911 case R_ARM_CALL:
6912 case R_ARM_JUMP24:
6913 case R_ARM_PC24: /* Arm B/BL instruction. */
6914 case R_ARM_PLT32:
6915 {
6916 bfd_vma from;
6917 bfd_signed_vma branch_offset;
6918 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6919
6920 if (r_type == R_ARM_XPC25)
6921 {
6922 /* Check for Arm calling Arm function. */
6923 /* FIXME: Should we translate the instruction into a BL
6924 instruction instead ? */
6925 if (sym_flags != STT_ARM_TFUNC)
6926 (*_bfd_error_handler)
6927 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6928 input_bfd,
6929 h ? h->root.root.string : "(local)");
6930 }
6931 else if (r_type == R_ARM_PC24)
6932 {
6933 /* Check for Arm calling Thumb function. */
6934 if (sym_flags == STT_ARM_TFUNC)
6935 {
6936 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6937 output_bfd, input_section,
6938 hit_data, sym_sec, rel->r_offset,
6939 signed_addend, value,
6940 error_message))
6941 return bfd_reloc_ok;
6942 else
6943 return bfd_reloc_dangerous;
6944 }
6945 }
6946
6947 /* Check if a stub has to be inserted because the
6948 destination is too far or we are changing mode. */
6949 if ( r_type == R_ARM_CALL
6950 || r_type == R_ARM_JUMP24
6951 || r_type == R_ARM_PLT32)
6952 {
6953 /* If the call goes through a PLT entry, make sure to
6954 check distance to the right destination address. */
6955 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6956 {
6957 value = (splt->output_section->vma
6958 + splt->output_offset
6959 + h->plt.offset);
6960 *unresolved_reloc_p = FALSE;
6961 }
6962
6963 from = (input_section->output_section->vma
6964 + input_section->output_offset
6965 + rel->r_offset);
6966 branch_offset = (bfd_signed_vma)(value - from);
6967
6968 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6969 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6970 || ((sym_flags == STT_ARM_TFUNC)
6971 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6972 || (r_type == R_ARM_JUMP24)
6973 || (r_type == R_ARM_PLT32) ))
6974 )
6975 {
6976 /* The target is out of reach, so redirect the
6977 branch to the local stub for this function. */
6978
6979 stub_entry = elf32_arm_get_stub_entry (input_section,
6980 sym_sec, h,
6981 rel, globals);
6982 if (stub_entry != NULL)
6983 value = (stub_entry->stub_offset
6984 + stub_entry->stub_sec->output_offset
6985 + stub_entry->stub_sec->output_section->vma);
6986 }
6987 }
6988
6989 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6990 where:
6991 S is the address of the symbol in the relocation.
6992 P is address of the instruction being relocated.
6993 A is the addend (extracted from the instruction) in bytes.
6994
6995 S is held in 'value'.
6996 P is the base address of the section containing the
6997 instruction plus the offset of the reloc into that
6998 section, ie:
6999 (input_section->output_section->vma +
7000 input_section->output_offset +
7001 rel->r_offset).
7002 A is the addend, converted into bytes, ie:
7003 (signed_addend * 4)
7004
7005 Note: None of these operations have knowledge of the pipeline
7006 size of the processor, thus it is up to the assembler to
7007 encode this information into the addend. */
7008 value -= (input_section->output_section->vma
7009 + input_section->output_offset);
7010 value -= rel->r_offset;
7011 if (globals->use_rel)
7012 value += (signed_addend << howto->size);
7013 else
7014 /* RELA addends do not have to be adjusted by howto->size. */
7015 value += signed_addend;
7016
7017 signed_addend = value;
7018 signed_addend >>= howto->rightshift;
7019
7020 /* A branch to an undefined weak symbol is turned into a jump to
7021 the next instruction unless a PLT entry will be created. */
7022 if (h && h->root.type == bfd_link_hash_undefweak
7023 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7024 {
7025 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7026 | 0x0affffff;
7027 }
7028 else
7029 {
7030 /* Perform a signed range check. */
7031 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7032 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7033 return bfd_reloc_overflow;
7034
7035 addend = (value & 2);
7036
7037 value = (signed_addend & howto->dst_mask)
7038 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7039
7040 if (r_type == R_ARM_CALL)
7041 {
7042 /* Set the H bit in the BLX instruction. */
7043 if (sym_flags == STT_ARM_TFUNC)
7044 {
7045 if (addend)
7046 value |= (1 << 24);
7047 else
7048 value &= ~(bfd_vma)(1 << 24);
7049 }
7050
7051 /* Select the correct instruction (BL or BLX). */
7052 /* Only if we are not handling a BL to a stub. In this
7053 case, mode switching is performed by the stub. */
7054 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7055 value |= (1 << 28);
7056 else
7057 {
7058 value &= ~(bfd_vma)(1 << 28);
7059 value |= (1 << 24);
7060 }
7061 }
7062 }
7063 }
7064 break;
7065
7066 case R_ARM_ABS32:
7067 value += addend;
7068 if (sym_flags == STT_ARM_TFUNC)
7069 value |= 1;
7070 break;
7071
7072 case R_ARM_ABS32_NOI:
7073 value += addend;
7074 break;
7075
7076 case R_ARM_REL32:
7077 value += addend;
7078 if (sym_flags == STT_ARM_TFUNC)
7079 value |= 1;
7080 value -= (input_section->output_section->vma
7081 + input_section->output_offset + rel->r_offset);
7082 break;
7083
7084 case R_ARM_REL32_NOI:
7085 value += addend;
7086 value -= (input_section->output_section->vma
7087 + input_section->output_offset + rel->r_offset);
7088 break;
7089
7090 case R_ARM_PREL31:
7091 value -= (input_section->output_section->vma
7092 + input_section->output_offset + rel->r_offset);
7093 value += signed_addend;
7094 if (! h || h->root.type != bfd_link_hash_undefweak)
7095 {
7096 /* Check for overflow. */
7097 if ((value ^ (value >> 1)) & (1 << 30))
7098 return bfd_reloc_overflow;
7099 }
7100 value &= 0x7fffffff;
7101 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7102 if (sym_flags == STT_ARM_TFUNC)
7103 value |= 1;
7104 break;
7105 }
7106
7107 bfd_put_32 (input_bfd, value, hit_data);
7108 return bfd_reloc_ok;
7109
7110 case R_ARM_ABS8:
7111 value += addend;
7112 if ((long) value > 0x7f || (long) value < -0x80)
7113 return bfd_reloc_overflow;
7114
7115 bfd_put_8 (input_bfd, value, hit_data);
7116 return bfd_reloc_ok;
7117
7118 case R_ARM_ABS16:
7119 value += addend;
7120
7121 if ((long) value > 0x7fff || (long) value < -0x8000)
7122 return bfd_reloc_overflow;
7123
7124 bfd_put_16 (input_bfd, value, hit_data);
7125 return bfd_reloc_ok;
7126
7127 case R_ARM_THM_ABS5:
7128 /* Support ldr and str instructions for the thumb. */
7129 if (globals->use_rel)
7130 {
7131 /* Need to refetch addend. */
7132 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7133 /* ??? Need to determine shift amount from operand size. */
7134 addend >>= howto->rightshift;
7135 }
7136 value += addend;
7137
7138 /* ??? Isn't value unsigned? */
7139 if ((long) value > 0x1f || (long) value < -0x10)
7140 return bfd_reloc_overflow;
7141
7142 /* ??? Value needs to be properly shifted into place first. */
7143 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7144 bfd_put_16 (input_bfd, value, hit_data);
7145 return bfd_reloc_ok;
7146
7147 case R_ARM_THM_ALU_PREL_11_0:
7148 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7149 {
7150 bfd_vma insn;
7151 bfd_signed_vma relocation;
7152
7153 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7154 | bfd_get_16 (input_bfd, hit_data + 2);
7155
7156 if (globals->use_rel)
7157 {
7158 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7159 | ((insn & (1 << 26)) >> 15);
7160 if (insn & 0xf00000)
7161 signed_addend = -signed_addend;
7162 }
7163
7164 relocation = value + signed_addend;
7165 relocation -= (input_section->output_section->vma
7166 + input_section->output_offset
7167 + rel->r_offset);
7168
7169 value = abs (relocation);
7170
7171 if (value >= 0x1000)
7172 return bfd_reloc_overflow;
7173
7174 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7175 | ((value & 0x700) << 4)
7176 | ((value & 0x800) << 15);
7177 if (relocation < 0)
7178 insn |= 0xa00000;
7179
7180 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7181 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7182
7183 return bfd_reloc_ok;
7184 }
7185
7186 case R_ARM_THM_PC8:
7187 /* PR 10073: This reloc is not generated by the GNU toolchain,
7188 but it is supported for compatibility with third party libraries
7189 generated by other compilers, specifically the ARM/IAR. */
7190 {
7191 bfd_vma insn;
7192 bfd_signed_vma relocation;
7193
7194 insn = bfd_get_16 (input_bfd, hit_data);
7195
7196 if (globals->use_rel)
7197 addend = (insn & 0x00ff) << 2;
7198
7199 relocation = value + addend;
7200 relocation -= (input_section->output_section->vma
7201 + input_section->output_offset
7202 + rel->r_offset);
7203
7204 value = abs (relocation);
7205
7206 /* We do not check for overflow of this reloc. Although strictly
7207 speaking this is incorrect, it appears to be necessary in order
7208 to work with IAR generated relocs. Since GCC and GAS do not
7209 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7210 a problem for them. */
7211 value &= 0x3fc;
7212
7213 insn = (insn & 0xff00) | (value >> 2);
7214
7215 bfd_put_16 (input_bfd, insn, hit_data);
7216
7217 return bfd_reloc_ok;
7218 }
7219
7220 case R_ARM_THM_PC12:
7221 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7222 {
7223 bfd_vma insn;
7224 bfd_signed_vma relocation;
7225
7226 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7227 | bfd_get_16 (input_bfd, hit_data + 2);
7228
7229 if (globals->use_rel)
7230 {
7231 signed_addend = insn & 0xfff;
7232 if (!(insn & (1 << 23)))
7233 signed_addend = -signed_addend;
7234 }
7235
7236 relocation = value + signed_addend;
7237 relocation -= (input_section->output_section->vma
7238 + input_section->output_offset
7239 + rel->r_offset);
7240
7241 value = abs (relocation);
7242
7243 if (value >= 0x1000)
7244 return bfd_reloc_overflow;
7245
7246 insn = (insn & 0xff7ff000) | value;
7247 if (relocation >= 0)
7248 insn |= (1 << 23);
7249
7250 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7251 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7252
7253 return bfd_reloc_ok;
7254 }
7255
7256 case R_ARM_THM_XPC22:
7257 case R_ARM_THM_CALL:
7258 case R_ARM_THM_JUMP24:
7259 /* Thumb BL (branch long instruction). */
7260 {
7261 bfd_vma relocation;
7262 bfd_vma reloc_sign;
7263 bfd_boolean overflow = FALSE;
7264 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7265 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7266 bfd_signed_vma reloc_signed_max;
7267 bfd_signed_vma reloc_signed_min;
7268 bfd_vma check;
7269 bfd_signed_vma signed_check;
7270 int bitsize;
7271 int thumb2 = using_thumb2 (globals);
7272
7273 /* A branch to an undefined weak symbol is turned into a jump to
7274 the next instruction unless a PLT entry will be created. */
7275 if (h && h->root.type == bfd_link_hash_undefweak
7276 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7277 {
7278 bfd_put_16 (input_bfd, 0xe000, hit_data);
7279 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7280 return bfd_reloc_ok;
7281 }
7282
7283 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7284 with Thumb-1) involving the J1 and J2 bits. */
7285 if (globals->use_rel)
7286 {
7287 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7288 bfd_vma upper = upper_insn & 0x3ff;
7289 bfd_vma lower = lower_insn & 0x7ff;
7290 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7291 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7292 bfd_vma i1 = j1 ^ s ? 0 : 1;
7293 bfd_vma i2 = j2 ^ s ? 0 : 1;
7294
7295 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7296 /* Sign extend. */
7297 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7298
7299 signed_addend = addend;
7300 }
7301
7302 if (r_type == R_ARM_THM_XPC22)
7303 {
7304 /* Check for Thumb to Thumb call. */
7305 /* FIXME: Should we translate the instruction into a BL
7306 instruction instead ? */
7307 if (sym_flags == STT_ARM_TFUNC)
7308 (*_bfd_error_handler)
7309 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7310 input_bfd,
7311 h ? h->root.root.string : "(local)");
7312 }
7313 else
7314 {
7315 /* If it is not a call to Thumb, assume call to Arm.
7316 If it is a call relative to a section name, then it is not a
7317 function call at all, but rather a long jump. Calls through
7318 the PLT do not require stubs. */
7319 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7320 && (h == NULL || splt == NULL
7321 || h->plt.offset == (bfd_vma) -1))
7322 {
7323 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7324 {
7325 /* Convert BL to BLX. */
7326 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7327 }
7328 else if (( r_type != R_ARM_THM_CALL)
7329 && (r_type != R_ARM_THM_JUMP24))
7330 {
7331 if (elf32_thumb_to_arm_stub
7332 (info, sym_name, input_bfd, output_bfd, input_section,
7333 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7334 error_message))
7335 return bfd_reloc_ok;
7336 else
7337 return bfd_reloc_dangerous;
7338 }
7339 }
7340 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7341 && r_type == R_ARM_THM_CALL)
7342 {
7343 /* Make sure this is a BL. */
7344 lower_insn |= 0x1800;
7345 }
7346 }
7347
7348 /* Handle calls via the PLT. */
7349 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7350 {
7351 value = (splt->output_section->vma
7352 + splt->output_offset
7353 + h->plt.offset);
7354 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7355 {
7356 /* If the Thumb BLX instruction is available, convert the
7357 BL to a BLX instruction to call the ARM-mode PLT entry. */
7358 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7359 }
7360 else
7361 /* Target the Thumb stub before the ARM PLT entry. */
7362 value -= PLT_THUMB_STUB_SIZE;
7363 *unresolved_reloc_p = FALSE;
7364 }
7365
7366 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7367 {
7368 /* Check if a stub has to be inserted because the destination
7369 is too far. */
7370 bfd_vma from;
7371 bfd_signed_vma branch_offset;
7372 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7373
7374 from = (input_section->output_section->vma
7375 + input_section->output_offset
7376 + rel->r_offset);
7377 branch_offset = (bfd_signed_vma)(value - from);
7378
7379 if ((!thumb2
7380 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7381 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7382 ||
7383 (thumb2
7384 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7385 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7386 || ((sym_flags != STT_ARM_TFUNC)
7387 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7388 || r_type == R_ARM_THM_JUMP24)))
7389 {
7390 /* The target is out of reach or we are changing modes, so
7391 redirect the branch to the local stub for this
7392 function. */
7393 stub_entry = elf32_arm_get_stub_entry (input_section,
7394 sym_sec, h,
7395 rel, globals);
7396 if (stub_entry != NULL)
7397 value = (stub_entry->stub_offset
7398 + stub_entry->stub_sec->output_offset
7399 + stub_entry->stub_sec->output_section->vma);
7400
7401 /* If this call becomes a call to Arm, force BLX. */
7402 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7403 {
7404 if ((stub_entry
7405 && !arm_stub_is_thumb (stub_entry->stub_type))
7406 || (sym_flags != STT_ARM_TFUNC))
7407 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7408 }
7409 }
7410 }
7411
7412 relocation = value + signed_addend;
7413
7414 relocation -= (input_section->output_section->vma
7415 + input_section->output_offset
7416 + rel->r_offset);
7417
7418 check = relocation >> howto->rightshift;
7419
7420 /* If this is a signed value, the rightshift just dropped
7421 leading 1 bits (assuming twos complement). */
7422 if ((bfd_signed_vma) relocation >= 0)
7423 signed_check = check;
7424 else
7425 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7426
7427 /* Calculate the permissable maximum and minimum values for
7428 this relocation according to whether we're relocating for
7429 Thumb-2 or not. */
7430 bitsize = howto->bitsize;
7431 if (!thumb2)
7432 bitsize -= 2;
7433 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7434 reloc_signed_min = ~reloc_signed_max;
7435
7436 /* Assumes two's complement. */
7437 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7438 overflow = TRUE;
7439
7440 if ((lower_insn & 0x5000) == 0x4000)
7441 /* For a BLX instruction, make sure that the relocation is rounded up
7442 to a word boundary. This follows the semantics of the instruction
7443 which specifies that bit 1 of the target address will come from bit
7444 1 of the base address. */
7445 relocation = (relocation + 2) & ~ 3;
7446
7447 /* Put RELOCATION back into the insn. Assumes two's complement.
7448 We use the Thumb-2 encoding, which is safe even if dealing with
7449 a Thumb-1 instruction by virtue of our overflow check above. */
7450 reloc_sign = (signed_check < 0) ? 1 : 0;
7451 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7452 | ((relocation >> 12) & 0x3ff)
7453 | (reloc_sign << 10);
7454 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7455 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7456 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7457 | ((relocation >> 1) & 0x7ff);
7458
7459 /* Put the relocated value back in the object file: */
7460 bfd_put_16 (input_bfd, upper_insn, hit_data);
7461 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7462
7463 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7464 }
7465 break;
7466
7467 case R_ARM_THM_JUMP19:
7468 /* Thumb32 conditional branch instruction. */
7469 {
7470 bfd_vma relocation;
7471 bfd_boolean overflow = FALSE;
7472 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7473 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7474 bfd_signed_vma reloc_signed_max = 0xffffe;
7475 bfd_signed_vma reloc_signed_min = -0x100000;
7476 bfd_signed_vma signed_check;
7477
7478 /* Need to refetch the addend, reconstruct the top three bits,
7479 and squish the two 11 bit pieces together. */
7480 if (globals->use_rel)
7481 {
7482 bfd_vma S = (upper_insn & 0x0400) >> 10;
7483 bfd_vma upper = (upper_insn & 0x003f);
7484 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7485 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7486 bfd_vma lower = (lower_insn & 0x07ff);
7487
7488 upper |= J1 << 6;
7489 upper |= J2 << 7;
7490 upper |= (!S) << 8;
7491 upper -= 0x0100; /* Sign extend. */
7492
7493 addend = (upper << 12) | (lower << 1);
7494 signed_addend = addend;
7495 }
7496
7497 /* Handle calls via the PLT. */
7498 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7499 {
7500 value = (splt->output_section->vma
7501 + splt->output_offset
7502 + h->plt.offset);
7503 /* Target the Thumb stub before the ARM PLT entry. */
7504 value -= PLT_THUMB_STUB_SIZE;
7505 *unresolved_reloc_p = FALSE;
7506 }
7507
7508 /* ??? Should handle interworking? GCC might someday try to
7509 use this for tail calls. */
7510
7511 relocation = value + signed_addend;
7512 relocation -= (input_section->output_section->vma
7513 + input_section->output_offset
7514 + rel->r_offset);
7515 signed_check = (bfd_signed_vma) relocation;
7516
7517 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7518 overflow = TRUE;
7519
7520 /* Put RELOCATION back into the insn. */
7521 {
7522 bfd_vma S = (relocation & 0x00100000) >> 20;
7523 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7524 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7525 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7526 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7527
7528 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7529 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7530 }
7531
7532 /* Put the relocated value back in the object file: */
7533 bfd_put_16 (input_bfd, upper_insn, hit_data);
7534 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7535
7536 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7537 }
7538
7539 case R_ARM_THM_JUMP11:
7540 case R_ARM_THM_JUMP8:
7541 case R_ARM_THM_JUMP6:
7542 /* Thumb B (branch) instruction). */
7543 {
7544 bfd_signed_vma relocation;
7545 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7546 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7547 bfd_signed_vma signed_check;
7548
7549 /* CZB cannot jump backward. */
7550 if (r_type == R_ARM_THM_JUMP6)
7551 reloc_signed_min = 0;
7552
7553 if (globals->use_rel)
7554 {
7555 /* Need to refetch addend. */
7556 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7557 if (addend & ((howto->src_mask + 1) >> 1))
7558 {
7559 signed_addend = -1;
7560 signed_addend &= ~ howto->src_mask;
7561 signed_addend |= addend;
7562 }
7563 else
7564 signed_addend = addend;
7565 /* The value in the insn has been right shifted. We need to
7566 undo this, so that we can perform the address calculation
7567 in terms of bytes. */
7568 signed_addend <<= howto->rightshift;
7569 }
7570 relocation = value + signed_addend;
7571
7572 relocation -= (input_section->output_section->vma
7573 + input_section->output_offset
7574 + rel->r_offset);
7575
7576 relocation >>= howto->rightshift;
7577 signed_check = relocation;
7578
7579 if (r_type == R_ARM_THM_JUMP6)
7580 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7581 else
7582 relocation &= howto->dst_mask;
7583 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7584
7585 bfd_put_16 (input_bfd, relocation, hit_data);
7586
7587 /* Assumes two's complement. */
7588 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7589 return bfd_reloc_overflow;
7590
7591 return bfd_reloc_ok;
7592 }
7593
7594 case R_ARM_ALU_PCREL7_0:
7595 case R_ARM_ALU_PCREL15_8:
7596 case R_ARM_ALU_PCREL23_15:
7597 {
7598 bfd_vma insn;
7599 bfd_vma relocation;
7600
7601 insn = bfd_get_32 (input_bfd, hit_data);
7602 if (globals->use_rel)
7603 {
7604 /* Extract the addend. */
7605 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7606 signed_addend = addend;
7607 }
7608 relocation = value + signed_addend;
7609
7610 relocation -= (input_section->output_section->vma
7611 + input_section->output_offset
7612 + rel->r_offset);
7613 insn = (insn & ~0xfff)
7614 | ((howto->bitpos << 7) & 0xf00)
7615 | ((relocation >> howto->bitpos) & 0xff);
7616 bfd_put_32 (input_bfd, value, hit_data);
7617 }
7618 return bfd_reloc_ok;
7619
7620 case R_ARM_GNU_VTINHERIT:
7621 case R_ARM_GNU_VTENTRY:
7622 return bfd_reloc_ok;
7623
7624 case R_ARM_GOTOFF32:
7625 /* Relocation is relative to the start of the
7626 global offset table. */
7627
7628 BFD_ASSERT (sgot != NULL);
7629 if (sgot == NULL)
7630 return bfd_reloc_notsupported;
7631
7632 /* If we are addressing a Thumb function, we need to adjust the
7633 address by one, so that attempts to call the function pointer will
7634 correctly interpret it as Thumb code. */
7635 if (sym_flags == STT_ARM_TFUNC)
7636 value += 1;
7637
7638 /* Note that sgot->output_offset is not involved in this
7639 calculation. We always want the start of .got. If we
7640 define _GLOBAL_OFFSET_TABLE in a different way, as is
7641 permitted by the ABI, we might have to change this
7642 calculation. */
7643 value -= sgot->output_section->vma;
7644 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7645 contents, rel->r_offset, value,
7646 rel->r_addend);
7647
7648 case R_ARM_GOTPC:
7649 /* Use global offset table as symbol value. */
7650 BFD_ASSERT (sgot != NULL);
7651
7652 if (sgot == NULL)
7653 return bfd_reloc_notsupported;
7654
7655 *unresolved_reloc_p = FALSE;
7656 value = sgot->output_section->vma;
7657 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7658 contents, rel->r_offset, value,
7659 rel->r_addend);
7660
7661 case R_ARM_GOT32:
7662 case R_ARM_GOT_PREL:
7663 /* Relocation is to the entry for this symbol in the
7664 global offset table. */
7665 if (sgot == NULL)
7666 return bfd_reloc_notsupported;
7667
7668 if (h != NULL)
7669 {
7670 bfd_vma off;
7671 bfd_boolean dyn;
7672
7673 off = h->got.offset;
7674 BFD_ASSERT (off != (bfd_vma) -1);
7675 dyn = globals->root.dynamic_sections_created;
7676
7677 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7678 || (info->shared
7679 && SYMBOL_REFERENCES_LOCAL (info, h))
7680 || (ELF_ST_VISIBILITY (h->other)
7681 && h->root.type == bfd_link_hash_undefweak))
7682 {
7683 /* This is actually a static link, or it is a -Bsymbolic link
7684 and the symbol is defined locally. We must initialize this
7685 entry in the global offset table. Since the offset must
7686 always be a multiple of 4, we use the least significant bit
7687 to record whether we have initialized it already.
7688
7689 When doing a dynamic link, we create a .rel(a).got relocation
7690 entry to initialize the value. This is done in the
7691 finish_dynamic_symbol routine. */
7692 if ((off & 1) != 0)
7693 off &= ~1;
7694 else
7695 {
7696 /* If we are addressing a Thumb function, we need to
7697 adjust the address by one, so that attempts to
7698 call the function pointer will correctly
7699 interpret it as Thumb code. */
7700 if (sym_flags == STT_ARM_TFUNC)
7701 value |= 1;
7702
7703 bfd_put_32 (output_bfd, value, sgot->contents + off);
7704 h->got.offset |= 1;
7705 }
7706 }
7707 else
7708 *unresolved_reloc_p = FALSE;
7709
7710 value = sgot->output_offset + off;
7711 }
7712 else
7713 {
7714 bfd_vma off;
7715
7716 BFD_ASSERT (local_got_offsets != NULL &&
7717 local_got_offsets[r_symndx] != (bfd_vma) -1);
7718
7719 off = local_got_offsets[r_symndx];
7720
7721 /* The offset must always be a multiple of 4. We use the
7722 least significant bit to record whether we have already
7723 generated the necessary reloc. */
7724 if ((off & 1) != 0)
7725 off &= ~1;
7726 else
7727 {
7728 /* If we are addressing a Thumb function, we need to
7729 adjust the address by one, so that attempts to
7730 call the function pointer will correctly
7731 interpret it as Thumb code. */
7732 if (sym_flags == STT_ARM_TFUNC)
7733 value |= 1;
7734
7735 if (globals->use_rel)
7736 bfd_put_32 (output_bfd, value, sgot->contents + off);
7737
7738 if (info->shared)
7739 {
7740 asection * srelgot;
7741 Elf_Internal_Rela outrel;
7742 bfd_byte *loc;
7743
7744 srelgot = (bfd_get_section_by_name
7745 (dynobj, RELOC_SECTION (globals, ".got")));
7746 BFD_ASSERT (srelgot != NULL);
7747
7748 outrel.r_addend = addend + value;
7749 outrel.r_offset = (sgot->output_section->vma
7750 + sgot->output_offset
7751 + off);
7752 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7753 loc = srelgot->contents;
7754 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7755 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7756 }
7757
7758 local_got_offsets[r_symndx] |= 1;
7759 }
7760
7761 value = sgot->output_offset + off;
7762 }
7763 if (r_type != R_ARM_GOT32)
7764 value += sgot->output_section->vma;
7765
7766 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7767 contents, rel->r_offset, value,
7768 rel->r_addend);
7769
7770 case R_ARM_TLS_LDO32:
7771 value = value - dtpoff_base (info);
7772
7773 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7774 contents, rel->r_offset, value,
7775 rel->r_addend);
7776
7777 case R_ARM_TLS_LDM32:
7778 {
7779 bfd_vma off;
7780
7781 if (globals->sgot == NULL)
7782 abort ();
7783
7784 off = globals->tls_ldm_got.offset;
7785
7786 if ((off & 1) != 0)
7787 off &= ~1;
7788 else
7789 {
7790 /* If we don't know the module number, create a relocation
7791 for it. */
7792 if (info->shared)
7793 {
7794 Elf_Internal_Rela outrel;
7795 bfd_byte *loc;
7796
7797 if (globals->srelgot == NULL)
7798 abort ();
7799
7800 outrel.r_addend = 0;
7801 outrel.r_offset = (globals->sgot->output_section->vma
7802 + globals->sgot->output_offset + off);
7803 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7804
7805 if (globals->use_rel)
7806 bfd_put_32 (output_bfd, outrel.r_addend,
7807 globals->sgot->contents + off);
7808
7809 loc = globals->srelgot->contents;
7810 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7811 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7812 }
7813 else
7814 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7815
7816 globals->tls_ldm_got.offset |= 1;
7817 }
7818
7819 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7820 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7821
7822 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7823 contents, rel->r_offset, value,
7824 rel->r_addend);
7825 }
7826
7827 case R_ARM_TLS_GD32:
7828 case R_ARM_TLS_IE32:
7829 {
7830 bfd_vma off;
7831 int indx;
7832 char tls_type;
7833
7834 if (globals->sgot == NULL)
7835 abort ();
7836
7837 indx = 0;
7838 if (h != NULL)
7839 {
7840 bfd_boolean dyn;
7841 dyn = globals->root.dynamic_sections_created;
7842 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7843 && (!info->shared
7844 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7845 {
7846 *unresolved_reloc_p = FALSE;
7847 indx = h->dynindx;
7848 }
7849 off = h->got.offset;
7850 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7851 }
7852 else
7853 {
7854 if (local_got_offsets == NULL)
7855 abort ();
7856 off = local_got_offsets[r_symndx];
7857 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7858 }
7859
7860 if (tls_type == GOT_UNKNOWN)
7861 abort ();
7862
7863 if ((off & 1) != 0)
7864 off &= ~1;
7865 else
7866 {
7867 bfd_boolean need_relocs = FALSE;
7868 Elf_Internal_Rela outrel;
7869 bfd_byte *loc = NULL;
7870 int cur_off = off;
7871
7872 /* The GOT entries have not been initialized yet. Do it
7873 now, and emit any relocations. If both an IE GOT and a
7874 GD GOT are necessary, we emit the GD first. */
7875
7876 if ((info->shared || indx != 0)
7877 && (h == NULL
7878 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7879 || h->root.type != bfd_link_hash_undefweak))
7880 {
7881 need_relocs = TRUE;
7882 if (globals->srelgot == NULL)
7883 abort ();
7884 loc = globals->srelgot->contents;
7885 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7886 }
7887
7888 if (tls_type & GOT_TLS_GD)
7889 {
7890 if (need_relocs)
7891 {
7892 outrel.r_addend = 0;
7893 outrel.r_offset = (globals->sgot->output_section->vma
7894 + globals->sgot->output_offset
7895 + cur_off);
7896 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7897
7898 if (globals->use_rel)
7899 bfd_put_32 (output_bfd, outrel.r_addend,
7900 globals->sgot->contents + cur_off);
7901
7902 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7903 globals->srelgot->reloc_count++;
7904 loc += RELOC_SIZE (globals);
7905
7906 if (indx == 0)
7907 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7908 globals->sgot->contents + cur_off + 4);
7909 else
7910 {
7911 outrel.r_addend = 0;
7912 outrel.r_info = ELF32_R_INFO (indx,
7913 R_ARM_TLS_DTPOFF32);
7914 outrel.r_offset += 4;
7915
7916 if (globals->use_rel)
7917 bfd_put_32 (output_bfd, outrel.r_addend,
7918 globals->sgot->contents + cur_off + 4);
7919
7920
7921 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7922 globals->srelgot->reloc_count++;
7923 loc += RELOC_SIZE (globals);
7924 }
7925 }
7926 else
7927 {
7928 /* If we are not emitting relocations for a
7929 general dynamic reference, then we must be in a
7930 static link or an executable link with the
7931 symbol binding locally. Mark it as belonging
7932 to module 1, the executable. */
7933 bfd_put_32 (output_bfd, 1,
7934 globals->sgot->contents + cur_off);
7935 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7936 globals->sgot->contents + cur_off + 4);
7937 }
7938
7939 cur_off += 8;
7940 }
7941
7942 if (tls_type & GOT_TLS_IE)
7943 {
7944 if (need_relocs)
7945 {
7946 if (indx == 0)
7947 outrel.r_addend = value - dtpoff_base (info);
7948 else
7949 outrel.r_addend = 0;
7950 outrel.r_offset = (globals->sgot->output_section->vma
7951 + globals->sgot->output_offset
7952 + cur_off);
7953 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7954
7955 if (globals->use_rel)
7956 bfd_put_32 (output_bfd, outrel.r_addend,
7957 globals->sgot->contents + cur_off);
7958
7959 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7960 globals->srelgot->reloc_count++;
7961 loc += RELOC_SIZE (globals);
7962 }
7963 else
7964 bfd_put_32 (output_bfd, tpoff (info, value),
7965 globals->sgot->contents + cur_off);
7966 cur_off += 4;
7967 }
7968
7969 if (h != NULL)
7970 h->got.offset |= 1;
7971 else
7972 local_got_offsets[r_symndx] |= 1;
7973 }
7974
7975 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7976 off += 8;
7977 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7978 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7979
7980 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7981 contents, rel->r_offset, value,
7982 rel->r_addend);
7983 }
7984
7985 case R_ARM_TLS_LE32:
7986 if (info->shared)
7987 {
7988 (*_bfd_error_handler)
7989 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7990 input_bfd, input_section,
7991 (long) rel->r_offset, howto->name);
7992 return FALSE;
7993 }
7994 else
7995 value = tpoff (info, value);
7996
7997 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7998 contents, rel->r_offset, value,
7999 rel->r_addend);
8000
8001 case R_ARM_V4BX:
8002 if (globals->fix_v4bx)
8003 {
8004 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8005
8006 /* Ensure that we have a BX instruction. */
8007 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8008
8009 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8010 {
8011 /* Branch to veneer. */
8012 bfd_vma glue_addr;
8013 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8014 glue_addr -= input_section->output_section->vma
8015 + input_section->output_offset
8016 + rel->r_offset + 8;
8017 insn = (insn & 0xf0000000) | 0x0a000000
8018 | ((glue_addr >> 2) & 0x00ffffff);
8019 }
8020 else
8021 {
8022 /* Preserve Rm (lowest four bits) and the condition code
8023 (highest four bits). Other bits encode MOV PC,Rm. */
8024 insn = (insn & 0xf000000f) | 0x01a0f000;
8025 }
8026
8027 bfd_put_32 (input_bfd, insn, hit_data);
8028 }
8029 return bfd_reloc_ok;
8030
8031 case R_ARM_MOVW_ABS_NC:
8032 case R_ARM_MOVT_ABS:
8033 case R_ARM_MOVW_PREL_NC:
8034 case R_ARM_MOVT_PREL:
8035 /* Until we properly support segment-base-relative addressing then
8036 we assume the segment base to be zero, as for the group relocations.
8037 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8038 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8039 case R_ARM_MOVW_BREL_NC:
8040 case R_ARM_MOVW_BREL:
8041 case R_ARM_MOVT_BREL:
8042 {
8043 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8044
8045 if (globals->use_rel)
8046 {
8047 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8048 signed_addend = (addend ^ 0x8000) - 0x8000;
8049 }
8050
8051 value += signed_addend;
8052
8053 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8054 value -= (input_section->output_section->vma
8055 + input_section->output_offset + rel->r_offset);
8056
8057 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8058 return bfd_reloc_overflow;
8059
8060 if (sym_flags == STT_ARM_TFUNC)
8061 value |= 1;
8062
8063 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8064 || r_type == R_ARM_MOVT_BREL)
8065 value >>= 16;
8066
8067 insn &= 0xfff0f000;
8068 insn |= value & 0xfff;
8069 insn |= (value & 0xf000) << 4;
8070 bfd_put_32 (input_bfd, insn, hit_data);
8071 }
8072 return bfd_reloc_ok;
8073
8074 case R_ARM_THM_MOVW_ABS_NC:
8075 case R_ARM_THM_MOVT_ABS:
8076 case R_ARM_THM_MOVW_PREL_NC:
8077 case R_ARM_THM_MOVT_PREL:
8078 /* Until we properly support segment-base-relative addressing then
8079 we assume the segment base to be zero, as for the above relocations.
8080 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8081 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8082 as R_ARM_THM_MOVT_ABS. */
8083 case R_ARM_THM_MOVW_BREL_NC:
8084 case R_ARM_THM_MOVW_BREL:
8085 case R_ARM_THM_MOVT_BREL:
8086 {
8087 bfd_vma insn;
8088
8089 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8090 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8091
8092 if (globals->use_rel)
8093 {
8094 addend = ((insn >> 4) & 0xf000)
8095 | ((insn >> 15) & 0x0800)
8096 | ((insn >> 4) & 0x0700)
8097 | (insn & 0x00ff);
8098 signed_addend = (addend ^ 0x8000) - 0x8000;
8099 }
8100
8101 value += signed_addend;
8102
8103 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8104 value -= (input_section->output_section->vma
8105 + input_section->output_offset + rel->r_offset);
8106
8107 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8108 return bfd_reloc_overflow;
8109
8110 if (sym_flags == STT_ARM_TFUNC)
8111 value |= 1;
8112
8113 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8114 || r_type == R_ARM_THM_MOVT_BREL)
8115 value >>= 16;
8116
8117 insn &= 0xfbf08f00;
8118 insn |= (value & 0xf000) << 4;
8119 insn |= (value & 0x0800) << 15;
8120 insn |= (value & 0x0700) << 4;
8121 insn |= (value & 0x00ff);
8122
8123 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8124 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8125 }
8126 return bfd_reloc_ok;
8127
8128 case R_ARM_ALU_PC_G0_NC:
8129 case R_ARM_ALU_PC_G1_NC:
8130 case R_ARM_ALU_PC_G0:
8131 case R_ARM_ALU_PC_G1:
8132 case R_ARM_ALU_PC_G2:
8133 case R_ARM_ALU_SB_G0_NC:
8134 case R_ARM_ALU_SB_G1_NC:
8135 case R_ARM_ALU_SB_G0:
8136 case R_ARM_ALU_SB_G1:
8137 case R_ARM_ALU_SB_G2:
8138 {
8139 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8140 bfd_vma pc = input_section->output_section->vma
8141 + input_section->output_offset + rel->r_offset;
8142 /* sb should be the origin of the *segment* containing the symbol.
8143 It is not clear how to obtain this OS-dependent value, so we
8144 make an arbitrary choice of zero. */
8145 bfd_vma sb = 0;
8146 bfd_vma residual;
8147 bfd_vma g_n;
8148 bfd_signed_vma signed_value;
8149 int group = 0;
8150
8151 /* Determine which group of bits to select. */
8152 switch (r_type)
8153 {
8154 case R_ARM_ALU_PC_G0_NC:
8155 case R_ARM_ALU_PC_G0:
8156 case R_ARM_ALU_SB_G0_NC:
8157 case R_ARM_ALU_SB_G0:
8158 group = 0;
8159 break;
8160
8161 case R_ARM_ALU_PC_G1_NC:
8162 case R_ARM_ALU_PC_G1:
8163 case R_ARM_ALU_SB_G1_NC:
8164 case R_ARM_ALU_SB_G1:
8165 group = 1;
8166 break;
8167
8168 case R_ARM_ALU_PC_G2:
8169 case R_ARM_ALU_SB_G2:
8170 group = 2;
8171 break;
8172
8173 default:
8174 abort ();
8175 }
8176
8177 /* If REL, extract the addend from the insn. If RELA, it will
8178 have already been fetched for us. */
8179 if (globals->use_rel)
8180 {
8181 int negative;
8182 bfd_vma constant = insn & 0xff;
8183 bfd_vma rotation = (insn & 0xf00) >> 8;
8184
8185 if (rotation == 0)
8186 signed_addend = constant;
8187 else
8188 {
8189 /* Compensate for the fact that in the instruction, the
8190 rotation is stored in multiples of 2 bits. */
8191 rotation *= 2;
8192
8193 /* Rotate "constant" right by "rotation" bits. */
8194 signed_addend = (constant >> rotation) |
8195 (constant << (8 * sizeof (bfd_vma) - rotation));
8196 }
8197
8198 /* Determine if the instruction is an ADD or a SUB.
8199 (For REL, this determines the sign of the addend.) */
8200 negative = identify_add_or_sub (insn);
8201 if (negative == 0)
8202 {
8203 (*_bfd_error_handler)
8204 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8205 input_bfd, input_section,
8206 (long) rel->r_offset, howto->name);
8207 return bfd_reloc_overflow;
8208 }
8209
8210 signed_addend *= negative;
8211 }
8212
8213 /* Compute the value (X) to go in the place. */
8214 if (r_type == R_ARM_ALU_PC_G0_NC
8215 || r_type == R_ARM_ALU_PC_G1_NC
8216 || r_type == R_ARM_ALU_PC_G0
8217 || r_type == R_ARM_ALU_PC_G1
8218 || r_type == R_ARM_ALU_PC_G2)
8219 /* PC relative. */
8220 signed_value = value - pc + signed_addend;
8221 else
8222 /* Section base relative. */
8223 signed_value = value - sb + signed_addend;
8224
8225 /* If the target symbol is a Thumb function, then set the
8226 Thumb bit in the address. */
8227 if (sym_flags == STT_ARM_TFUNC)
8228 signed_value |= 1;
8229
8230 /* Calculate the value of the relevant G_n, in encoded
8231 constant-with-rotation format. */
8232 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8233 &residual);
8234
8235 /* Check for overflow if required. */
8236 if ((r_type == R_ARM_ALU_PC_G0
8237 || r_type == R_ARM_ALU_PC_G1
8238 || r_type == R_ARM_ALU_PC_G2
8239 || r_type == R_ARM_ALU_SB_G0
8240 || r_type == R_ARM_ALU_SB_G1
8241 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8242 {
8243 (*_bfd_error_handler)
8244 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8245 input_bfd, input_section,
8246 (long) rel->r_offset, abs (signed_value), howto->name);
8247 return bfd_reloc_overflow;
8248 }
8249
8250 /* Mask out the value and the ADD/SUB part of the opcode; take care
8251 not to destroy the S bit. */
8252 insn &= 0xff1ff000;
8253
8254 /* Set the opcode according to whether the value to go in the
8255 place is negative. */
8256 if (signed_value < 0)
8257 insn |= 1 << 22;
8258 else
8259 insn |= 1 << 23;
8260
8261 /* Encode the offset. */
8262 insn |= g_n;
8263
8264 bfd_put_32 (input_bfd, insn, hit_data);
8265 }
8266 return bfd_reloc_ok;
8267
8268 case R_ARM_LDR_PC_G0:
8269 case R_ARM_LDR_PC_G1:
8270 case R_ARM_LDR_PC_G2:
8271 case R_ARM_LDR_SB_G0:
8272 case R_ARM_LDR_SB_G1:
8273 case R_ARM_LDR_SB_G2:
8274 {
8275 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8276 bfd_vma pc = input_section->output_section->vma
8277 + input_section->output_offset + rel->r_offset;
8278 bfd_vma sb = 0; /* See note above. */
8279 bfd_vma residual;
8280 bfd_signed_vma signed_value;
8281 int group = 0;
8282
8283 /* Determine which groups of bits to calculate. */
8284 switch (r_type)
8285 {
8286 case R_ARM_LDR_PC_G0:
8287 case R_ARM_LDR_SB_G0:
8288 group = 0;
8289 break;
8290
8291 case R_ARM_LDR_PC_G1:
8292 case R_ARM_LDR_SB_G1:
8293 group = 1;
8294 break;
8295
8296 case R_ARM_LDR_PC_G2:
8297 case R_ARM_LDR_SB_G2:
8298 group = 2;
8299 break;
8300
8301 default:
8302 abort ();
8303 }
8304
8305 /* If REL, extract the addend from the insn. If RELA, it will
8306 have already been fetched for us. */
8307 if (globals->use_rel)
8308 {
8309 int negative = (insn & (1 << 23)) ? 1 : -1;
8310 signed_addend = negative * (insn & 0xfff);
8311 }
8312
8313 /* Compute the value (X) to go in the place. */
8314 if (r_type == R_ARM_LDR_PC_G0
8315 || r_type == R_ARM_LDR_PC_G1
8316 || r_type == R_ARM_LDR_PC_G2)
8317 /* PC relative. */
8318 signed_value = value - pc + signed_addend;
8319 else
8320 /* Section base relative. */
8321 signed_value = value - sb + signed_addend;
8322
8323 /* Calculate the value of the relevant G_{n-1} to obtain
8324 the residual at that stage. */
8325 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8326
8327 /* Check for overflow. */
8328 if (residual >= 0x1000)
8329 {
8330 (*_bfd_error_handler)
8331 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8332 input_bfd, input_section,
8333 (long) rel->r_offset, abs (signed_value), howto->name);
8334 return bfd_reloc_overflow;
8335 }
8336
8337 /* Mask out the value and U bit. */
8338 insn &= 0xff7ff000;
8339
8340 /* Set the U bit if the value to go in the place is non-negative. */
8341 if (signed_value >= 0)
8342 insn |= 1 << 23;
8343
8344 /* Encode the offset. */
8345 insn |= residual;
8346
8347 bfd_put_32 (input_bfd, insn, hit_data);
8348 }
8349 return bfd_reloc_ok;
8350
8351 case R_ARM_LDRS_PC_G0:
8352 case R_ARM_LDRS_PC_G1:
8353 case R_ARM_LDRS_PC_G2:
8354 case R_ARM_LDRS_SB_G0:
8355 case R_ARM_LDRS_SB_G1:
8356 case R_ARM_LDRS_SB_G2:
8357 {
8358 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8359 bfd_vma pc = input_section->output_section->vma
8360 + input_section->output_offset + rel->r_offset;
8361 bfd_vma sb = 0; /* See note above. */
8362 bfd_vma residual;
8363 bfd_signed_vma signed_value;
8364 int group = 0;
8365
8366 /* Determine which groups of bits to calculate. */
8367 switch (r_type)
8368 {
8369 case R_ARM_LDRS_PC_G0:
8370 case R_ARM_LDRS_SB_G0:
8371 group = 0;
8372 break;
8373
8374 case R_ARM_LDRS_PC_G1:
8375 case R_ARM_LDRS_SB_G1:
8376 group = 1;
8377 break;
8378
8379 case R_ARM_LDRS_PC_G2:
8380 case R_ARM_LDRS_SB_G2:
8381 group = 2;
8382 break;
8383
8384 default:
8385 abort ();
8386 }
8387
8388 /* If REL, extract the addend from the insn. If RELA, it will
8389 have already been fetched for us. */
8390 if (globals->use_rel)
8391 {
8392 int negative = (insn & (1 << 23)) ? 1 : -1;
8393 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8394 }
8395
8396 /* Compute the value (X) to go in the place. */
8397 if (r_type == R_ARM_LDRS_PC_G0
8398 || r_type == R_ARM_LDRS_PC_G1
8399 || r_type == R_ARM_LDRS_PC_G2)
8400 /* PC relative. */
8401 signed_value = value - pc + signed_addend;
8402 else
8403 /* Section base relative. */
8404 signed_value = value - sb + signed_addend;
8405
8406 /* Calculate the value of the relevant G_{n-1} to obtain
8407 the residual at that stage. */
8408 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8409
8410 /* Check for overflow. */
8411 if (residual >= 0x100)
8412 {
8413 (*_bfd_error_handler)
8414 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8415 input_bfd, input_section,
8416 (long) rel->r_offset, abs (signed_value), howto->name);
8417 return bfd_reloc_overflow;
8418 }
8419
8420 /* Mask out the value and U bit. */
8421 insn &= 0xff7ff0f0;
8422
8423 /* Set the U bit if the value to go in the place is non-negative. */
8424 if (signed_value >= 0)
8425 insn |= 1 << 23;
8426
8427 /* Encode the offset. */
8428 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8429
8430 bfd_put_32 (input_bfd, insn, hit_data);
8431 }
8432 return bfd_reloc_ok;
8433
8434 case R_ARM_LDC_PC_G0:
8435 case R_ARM_LDC_PC_G1:
8436 case R_ARM_LDC_PC_G2:
8437 case R_ARM_LDC_SB_G0:
8438 case R_ARM_LDC_SB_G1:
8439 case R_ARM_LDC_SB_G2:
8440 {
8441 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8442 bfd_vma pc = input_section->output_section->vma
8443 + input_section->output_offset + rel->r_offset;
8444 bfd_vma sb = 0; /* See note above. */
8445 bfd_vma residual;
8446 bfd_signed_vma signed_value;
8447 int group = 0;
8448
8449 /* Determine which groups of bits to calculate. */
8450 switch (r_type)
8451 {
8452 case R_ARM_LDC_PC_G0:
8453 case R_ARM_LDC_SB_G0:
8454 group = 0;
8455 break;
8456
8457 case R_ARM_LDC_PC_G1:
8458 case R_ARM_LDC_SB_G1:
8459 group = 1;
8460 break;
8461
8462 case R_ARM_LDC_PC_G2:
8463 case R_ARM_LDC_SB_G2:
8464 group = 2;
8465 break;
8466
8467 default:
8468 abort ();
8469 }
8470
8471 /* If REL, extract the addend from the insn. If RELA, it will
8472 have already been fetched for us. */
8473 if (globals->use_rel)
8474 {
8475 int negative = (insn & (1 << 23)) ? 1 : -1;
8476 signed_addend = negative * ((insn & 0xff) << 2);
8477 }
8478
8479 /* Compute the value (X) to go in the place. */
8480 if (r_type == R_ARM_LDC_PC_G0
8481 || r_type == R_ARM_LDC_PC_G1
8482 || r_type == R_ARM_LDC_PC_G2)
8483 /* PC relative. */
8484 signed_value = value - pc + signed_addend;
8485 else
8486 /* Section base relative. */
8487 signed_value = value - sb + signed_addend;
8488
8489 /* Calculate the value of the relevant G_{n-1} to obtain
8490 the residual at that stage. */
8491 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8492
8493 /* Check for overflow. (The absolute value to go in the place must be
8494 divisible by four and, after having been divided by four, must
8495 fit in eight bits.) */
8496 if ((residual & 0x3) != 0 || residual >= 0x400)
8497 {
8498 (*_bfd_error_handler)
8499 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8500 input_bfd, input_section,
8501 (long) rel->r_offset, abs (signed_value), howto->name);
8502 return bfd_reloc_overflow;
8503 }
8504
8505 /* Mask out the value and U bit. */
8506 insn &= 0xff7fff00;
8507
8508 /* Set the U bit if the value to go in the place is non-negative. */
8509 if (signed_value >= 0)
8510 insn |= 1 << 23;
8511
8512 /* Encode the offset. */
8513 insn |= residual >> 2;
8514
8515 bfd_put_32 (input_bfd, insn, hit_data);
8516 }
8517 return bfd_reloc_ok;
8518
8519 default:
8520 return bfd_reloc_notsupported;
8521 }
8522 }
8523
8524 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8525 static void
8526 arm_add_to_rel (bfd * abfd,
8527 bfd_byte * address,
8528 reloc_howto_type * howto,
8529 bfd_signed_vma increment)
8530 {
8531 bfd_signed_vma addend;
8532
8533 if (howto->type == R_ARM_THM_CALL
8534 || howto->type == R_ARM_THM_JUMP24)
8535 {
8536 int upper_insn, lower_insn;
8537 int upper, lower;
8538
8539 upper_insn = bfd_get_16 (abfd, address);
8540 lower_insn = bfd_get_16 (abfd, address + 2);
8541 upper = upper_insn & 0x7ff;
8542 lower = lower_insn & 0x7ff;
8543
8544 addend = (upper << 12) | (lower << 1);
8545 addend += increment;
8546 addend >>= 1;
8547
8548 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8549 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8550
8551 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8552 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8553 }
8554 else
8555 {
8556 bfd_vma contents;
8557
8558 contents = bfd_get_32 (abfd, address);
8559
8560 /* Get the (signed) value from the instruction. */
8561 addend = contents & howto->src_mask;
8562 if (addend & ((howto->src_mask + 1) >> 1))
8563 {
8564 bfd_signed_vma mask;
8565
8566 mask = -1;
8567 mask &= ~ howto->src_mask;
8568 addend |= mask;
8569 }
8570
8571 /* Add in the increment, (which is a byte value). */
8572 switch (howto->type)
8573 {
8574 default:
8575 addend += increment;
8576 break;
8577
8578 case R_ARM_PC24:
8579 case R_ARM_PLT32:
8580 case R_ARM_CALL:
8581 case R_ARM_JUMP24:
8582 addend <<= howto->size;
8583 addend += increment;
8584
8585 /* Should we check for overflow here ? */
8586
8587 /* Drop any undesired bits. */
8588 addend >>= howto->rightshift;
8589 break;
8590 }
8591
8592 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8593
8594 bfd_put_32 (abfd, contents, address);
8595 }
8596 }
8597
8598 #define IS_ARM_TLS_RELOC(R_TYPE) \
8599 ((R_TYPE) == R_ARM_TLS_GD32 \
8600 || (R_TYPE) == R_ARM_TLS_LDO32 \
8601 || (R_TYPE) == R_ARM_TLS_LDM32 \
8602 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8603 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8604 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8605 || (R_TYPE) == R_ARM_TLS_LE32 \
8606 || (R_TYPE) == R_ARM_TLS_IE32)
8607
8608 /* Relocate an ARM ELF section. */
8609
8610 static bfd_boolean
8611 elf32_arm_relocate_section (bfd * output_bfd,
8612 struct bfd_link_info * info,
8613 bfd * input_bfd,
8614 asection * input_section,
8615 bfd_byte * contents,
8616 Elf_Internal_Rela * relocs,
8617 Elf_Internal_Sym * local_syms,
8618 asection ** local_sections)
8619 {
8620 Elf_Internal_Shdr *symtab_hdr;
8621 struct elf_link_hash_entry **sym_hashes;
8622 Elf_Internal_Rela *rel;
8623 Elf_Internal_Rela *relend;
8624 const char *name;
8625 struct elf32_arm_link_hash_table * globals;
8626
8627 globals = elf32_arm_hash_table (info);
8628
8629 symtab_hdr = & elf_symtab_hdr (input_bfd);
8630 sym_hashes = elf_sym_hashes (input_bfd);
8631
8632 rel = relocs;
8633 relend = relocs + input_section->reloc_count;
8634 for (; rel < relend; rel++)
8635 {
8636 int r_type;
8637 reloc_howto_type * howto;
8638 unsigned long r_symndx;
8639 Elf_Internal_Sym * sym;
8640 asection * sec;
8641 struct elf_link_hash_entry * h;
8642 bfd_vma relocation;
8643 bfd_reloc_status_type r;
8644 arelent bfd_reloc;
8645 char sym_type;
8646 bfd_boolean unresolved_reloc = FALSE;
8647 char *error_message = NULL;
8648
8649 r_symndx = ELF32_R_SYM (rel->r_info);
8650 r_type = ELF32_R_TYPE (rel->r_info);
8651 r_type = arm_real_reloc_type (globals, r_type);
8652
8653 if ( r_type == R_ARM_GNU_VTENTRY
8654 || r_type == R_ARM_GNU_VTINHERIT)
8655 continue;
8656
8657 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8658 howto = bfd_reloc.howto;
8659
8660 h = NULL;
8661 sym = NULL;
8662 sec = NULL;
8663
8664 if (r_symndx < symtab_hdr->sh_info)
8665 {
8666 sym = local_syms + r_symndx;
8667 sym_type = ELF32_ST_TYPE (sym->st_info);
8668 sec = local_sections[r_symndx];
8669 if (globals->use_rel)
8670 {
8671 relocation = (sec->output_section->vma
8672 + sec->output_offset
8673 + sym->st_value);
8674 if (!info->relocatable
8675 && (sec->flags & SEC_MERGE)
8676 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8677 {
8678 asection *msec;
8679 bfd_vma addend, value;
8680
8681 switch (r_type)
8682 {
8683 case R_ARM_MOVW_ABS_NC:
8684 case R_ARM_MOVT_ABS:
8685 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8686 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8687 addend = (addend ^ 0x8000) - 0x8000;
8688 break;
8689
8690 case R_ARM_THM_MOVW_ABS_NC:
8691 case R_ARM_THM_MOVT_ABS:
8692 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8693 << 16;
8694 value |= bfd_get_16 (input_bfd,
8695 contents + rel->r_offset + 2);
8696 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8697 | ((value & 0x04000000) >> 15);
8698 addend = (addend ^ 0x8000) - 0x8000;
8699 break;
8700
8701 default:
8702 if (howto->rightshift
8703 || (howto->src_mask & (howto->src_mask + 1)))
8704 {
8705 (*_bfd_error_handler)
8706 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8707 input_bfd, input_section,
8708 (long) rel->r_offset, howto->name);
8709 return FALSE;
8710 }
8711
8712 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8713
8714 /* Get the (signed) value from the instruction. */
8715 addend = value & howto->src_mask;
8716 if (addend & ((howto->src_mask + 1) >> 1))
8717 {
8718 bfd_signed_vma mask;
8719
8720 mask = -1;
8721 mask &= ~ howto->src_mask;
8722 addend |= mask;
8723 }
8724 break;
8725 }
8726
8727 msec = sec;
8728 addend =
8729 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8730 - relocation;
8731 addend += msec->output_section->vma + msec->output_offset;
8732
8733 /* Cases here must match those in the preceeding
8734 switch statement. */
8735 switch (r_type)
8736 {
8737 case R_ARM_MOVW_ABS_NC:
8738 case R_ARM_MOVT_ABS:
8739 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8740 | (addend & 0xfff);
8741 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8742 break;
8743
8744 case R_ARM_THM_MOVW_ABS_NC:
8745 case R_ARM_THM_MOVT_ABS:
8746 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8747 | (addend & 0xff) | ((addend & 0x0800) << 15);
8748 bfd_put_16 (input_bfd, value >> 16,
8749 contents + rel->r_offset);
8750 bfd_put_16 (input_bfd, value,
8751 contents + rel->r_offset + 2);
8752 break;
8753
8754 default:
8755 value = (value & ~ howto->dst_mask)
8756 | (addend & howto->dst_mask);
8757 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8758 break;
8759 }
8760 }
8761 }
8762 else
8763 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8764 }
8765 else
8766 {
8767 bfd_boolean warned;
8768
8769 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8770 r_symndx, symtab_hdr, sym_hashes,
8771 h, sec, relocation,
8772 unresolved_reloc, warned);
8773
8774 sym_type = h->type;
8775 }
8776
8777 if (sec != NULL && elf_discarded_section (sec))
8778 {
8779 /* For relocs against symbols from removed linkonce sections,
8780 or sections discarded by a linker script, we just want the
8781 section contents zeroed. Avoid any special processing. */
8782 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8783 rel->r_info = 0;
8784 rel->r_addend = 0;
8785 continue;
8786 }
8787
8788 if (info->relocatable)
8789 {
8790 /* This is a relocatable link. We don't have to change
8791 anything, unless the reloc is against a section symbol,
8792 in which case we have to adjust according to where the
8793 section symbol winds up in the output section. */
8794 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8795 {
8796 if (globals->use_rel)
8797 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8798 howto, (bfd_signed_vma) sec->output_offset);
8799 else
8800 rel->r_addend += sec->output_offset;
8801 }
8802 continue;
8803 }
8804
8805 if (h != NULL)
8806 name = h->root.root.string;
8807 else
8808 {
8809 name = (bfd_elf_string_from_elf_section
8810 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8811 if (name == NULL || *name == '\0')
8812 name = bfd_section_name (input_bfd, sec);
8813 }
8814
8815 if (r_symndx != 0
8816 && r_type != R_ARM_NONE
8817 && (h == NULL
8818 || h->root.type == bfd_link_hash_defined
8819 || h->root.type == bfd_link_hash_defweak)
8820 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8821 {
8822 (*_bfd_error_handler)
8823 ((sym_type == STT_TLS
8824 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8825 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8826 input_bfd,
8827 input_section,
8828 (long) rel->r_offset,
8829 howto->name,
8830 name);
8831 }
8832
8833 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8834 input_section, contents, rel,
8835 relocation, info, sec, name,
8836 (h ? ELF_ST_TYPE (h->type) :
8837 ELF_ST_TYPE (sym->st_info)), h,
8838 &unresolved_reloc, &error_message);
8839
8840 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8841 because such sections are not SEC_ALLOC and thus ld.so will
8842 not process them. */
8843 if (unresolved_reloc
8844 && !((input_section->flags & SEC_DEBUGGING) != 0
8845 && h->def_dynamic))
8846 {
8847 (*_bfd_error_handler)
8848 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8849 input_bfd,
8850 input_section,
8851 (long) rel->r_offset,
8852 howto->name,
8853 h->root.root.string);
8854 return FALSE;
8855 }
8856
8857 if (r != bfd_reloc_ok)
8858 {
8859 switch (r)
8860 {
8861 case bfd_reloc_overflow:
8862 /* If the overflowing reloc was to an undefined symbol,
8863 we have already printed one error message and there
8864 is no point complaining again. */
8865 if ((! h ||
8866 h->root.type != bfd_link_hash_undefined)
8867 && (!((*info->callbacks->reloc_overflow)
8868 (info, (h ? &h->root : NULL), name, howto->name,
8869 (bfd_vma) 0, input_bfd, input_section,
8870 rel->r_offset))))
8871 return FALSE;
8872 break;
8873
8874 case bfd_reloc_undefined:
8875 if (!((*info->callbacks->undefined_symbol)
8876 (info, name, input_bfd, input_section,
8877 rel->r_offset, TRUE)))
8878 return FALSE;
8879 break;
8880
8881 case bfd_reloc_outofrange:
8882 error_message = _("out of range");
8883 goto common_error;
8884
8885 case bfd_reloc_notsupported:
8886 error_message = _("unsupported relocation");
8887 goto common_error;
8888
8889 case bfd_reloc_dangerous:
8890 /* error_message should already be set. */
8891 goto common_error;
8892
8893 default:
8894 error_message = _("unknown error");
8895 /* Fall through. */
8896
8897 common_error:
8898 BFD_ASSERT (error_message != NULL);
8899 if (!((*info->callbacks->reloc_dangerous)
8900 (info, error_message, input_bfd, input_section,
8901 rel->r_offset)))
8902 return FALSE;
8903 break;
8904 }
8905 }
8906 }
8907
8908 return TRUE;
8909 }
8910
8911 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8912 adds the edit to the start of the list. (The list must be built in order of
8913 ascending INDEX: the function's callers are primarily responsible for
8914 maintaining that condition). */
8915
8916 static void
8917 add_unwind_table_edit (arm_unwind_table_edit **head,
8918 arm_unwind_table_edit **tail,
8919 arm_unwind_edit_type type,
8920 asection *linked_section,
8921 unsigned int index)
8922 {
8923 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8924
8925 new_edit->type = type;
8926 new_edit->linked_section = linked_section;
8927 new_edit->index = index;
8928
8929 if (index > 0)
8930 {
8931 new_edit->next = NULL;
8932
8933 if (*tail)
8934 (*tail)->next = new_edit;
8935
8936 (*tail) = new_edit;
8937
8938 if (!*head)
8939 (*head) = new_edit;
8940 }
8941 else
8942 {
8943 new_edit->next = *head;
8944
8945 if (!*tail)
8946 *tail = new_edit;
8947
8948 *head = new_edit;
8949 }
8950 }
8951
8952 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8953
8954 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8955 static void
8956 adjust_exidx_size(asection *exidx_sec, int adjust)
8957 {
8958 asection *out_sec;
8959
8960 if (!exidx_sec->rawsize)
8961 exidx_sec->rawsize = exidx_sec->size;
8962
8963 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8964 out_sec = exidx_sec->output_section;
8965 /* Adjust size of output section. */
8966 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8967 }
8968
8969 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8970 static void
8971 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8972 {
8973 struct _arm_elf_section_data *exidx_arm_data;
8974
8975 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8976 add_unwind_table_edit (
8977 &exidx_arm_data->u.exidx.unwind_edit_list,
8978 &exidx_arm_data->u.exidx.unwind_edit_tail,
8979 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8980
8981 adjust_exidx_size(exidx_sec, 8);
8982 }
8983
8984 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8985 made to those tables, such that:
8986
8987 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8988 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8989 codes which have been inlined into the index).
8990
8991 The edits are applied when the tables are written
8992 (in elf32_arm_write_section).
8993 */
8994
8995 bfd_boolean
8996 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8997 unsigned int num_text_sections,
8998 struct bfd_link_info *info)
8999 {
9000 bfd *inp;
9001 unsigned int last_second_word = 0, i;
9002 asection *last_exidx_sec = NULL;
9003 asection *last_text_sec = NULL;
9004 int last_unwind_type = -1;
9005
9006 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9007 text sections. */
9008 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9009 {
9010 asection *sec;
9011
9012 for (sec = inp->sections; sec != NULL; sec = sec->next)
9013 {
9014 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9015 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9016
9017 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9018 continue;
9019
9020 if (elf_sec->linked_to)
9021 {
9022 Elf_Internal_Shdr *linked_hdr
9023 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9024 struct _arm_elf_section_data *linked_sec_arm_data
9025 = get_arm_elf_section_data (linked_hdr->bfd_section);
9026
9027 if (linked_sec_arm_data == NULL)
9028 continue;
9029
9030 /* Link this .ARM.exidx section back from the text section it
9031 describes. */
9032 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9033 }
9034 }
9035 }
9036
9037 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9038 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9039 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
9040 */
9041
9042 for (i = 0; i < num_text_sections; i++)
9043 {
9044 asection *sec = text_section_order[i];
9045 asection *exidx_sec;
9046 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9047 struct _arm_elf_section_data *exidx_arm_data;
9048 bfd_byte *contents = NULL;
9049 int deleted_exidx_bytes = 0;
9050 bfd_vma j;
9051 arm_unwind_table_edit *unwind_edit_head = NULL;
9052 arm_unwind_table_edit *unwind_edit_tail = NULL;
9053 Elf_Internal_Shdr *hdr;
9054 bfd *ibfd;
9055
9056 if (arm_data == NULL)
9057 continue;
9058
9059 exidx_sec = arm_data->u.text.arm_exidx_sec;
9060 if (exidx_sec == NULL)
9061 {
9062 /* Section has no unwind data. */
9063 if (last_unwind_type == 0 || !last_exidx_sec)
9064 continue;
9065
9066 /* Ignore zero sized sections. */
9067 if (sec->size == 0)
9068 continue;
9069
9070 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9071 last_unwind_type = 0;
9072 continue;
9073 }
9074
9075 /* Skip /DISCARD/ sections. */
9076 if (bfd_is_abs_section (exidx_sec->output_section))
9077 continue;
9078
9079 hdr = &elf_section_data (exidx_sec)->this_hdr;
9080 if (hdr->sh_type != SHT_ARM_EXIDX)
9081 continue;
9082
9083 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9084 if (exidx_arm_data == NULL)
9085 continue;
9086
9087 ibfd = exidx_sec->owner;
9088
9089 if (hdr->contents != NULL)
9090 contents = hdr->contents;
9091 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9092 /* An error? */
9093 continue;
9094
9095 for (j = 0; j < hdr->sh_size; j += 8)
9096 {
9097 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9098 int unwind_type;
9099 int elide = 0;
9100
9101 /* An EXIDX_CANTUNWIND entry. */
9102 if (second_word == 1)
9103 {
9104 if (last_unwind_type == 0)
9105 elide = 1;
9106 unwind_type = 0;
9107 }
9108 /* Inlined unwinding data. Merge if equal to previous. */
9109 else if ((second_word & 0x80000000) != 0)
9110 {
9111 if (last_second_word == second_word && last_unwind_type == 1)
9112 elide = 1;
9113 unwind_type = 1;
9114 last_second_word = second_word;
9115 }
9116 /* Normal table entry. In theory we could merge these too,
9117 but duplicate entries are likely to be much less common. */
9118 else
9119 unwind_type = 2;
9120
9121 if (elide)
9122 {
9123 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9124 DELETE_EXIDX_ENTRY, NULL, j / 8);
9125
9126 deleted_exidx_bytes += 8;
9127 }
9128
9129 last_unwind_type = unwind_type;
9130 }
9131
9132 /* Free contents if we allocated it ourselves. */
9133 if (contents != hdr->contents)
9134 free (contents);
9135
9136 /* Record edits to be applied later (in elf32_arm_write_section). */
9137 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9138 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9139
9140 if (deleted_exidx_bytes > 0)
9141 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9142
9143 last_exidx_sec = exidx_sec;
9144 last_text_sec = sec;
9145 }
9146
9147 /* Add terminating CANTUNWIND entry. */
9148 if (last_exidx_sec && last_unwind_type != 0)
9149 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9150
9151 return TRUE;
9152 }
9153
9154 static bfd_boolean
9155 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9156 bfd *ibfd, const char *name)
9157 {
9158 asection *sec, *osec;
9159
9160 sec = bfd_get_section_by_name (ibfd, name);
9161 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9162 return TRUE;
9163
9164 osec = sec->output_section;
9165 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9166 return TRUE;
9167
9168 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9169 sec->output_offset, sec->size))
9170 return FALSE;
9171
9172 return TRUE;
9173 }
9174
9175 static bfd_boolean
9176 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9177 {
9178 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9179
9180 /* Invoke the regular ELF backend linker to do all the work. */
9181 if (!bfd_elf_final_link (abfd, info))
9182 return FALSE;
9183
9184 /* Write out any glue sections now that we have created all the
9185 stubs. */
9186 if (globals->bfd_of_glue_owner != NULL)
9187 {
9188 if (! elf32_arm_output_glue_section (info, abfd,
9189 globals->bfd_of_glue_owner,
9190 ARM2THUMB_GLUE_SECTION_NAME))
9191 return FALSE;
9192
9193 if (! elf32_arm_output_glue_section (info, abfd,
9194 globals->bfd_of_glue_owner,
9195 THUMB2ARM_GLUE_SECTION_NAME))
9196 return FALSE;
9197
9198 if (! elf32_arm_output_glue_section (info, abfd,
9199 globals->bfd_of_glue_owner,
9200 VFP11_ERRATUM_VENEER_SECTION_NAME))
9201 return FALSE;
9202
9203 if (! elf32_arm_output_glue_section (info, abfd,
9204 globals->bfd_of_glue_owner,
9205 ARM_BX_GLUE_SECTION_NAME))
9206 return FALSE;
9207 }
9208
9209 return TRUE;
9210 }
9211
9212 /* Set the right machine number. */
9213
9214 static bfd_boolean
9215 elf32_arm_object_p (bfd *abfd)
9216 {
9217 unsigned int mach;
9218
9219 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9220
9221 if (mach != bfd_mach_arm_unknown)
9222 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9223
9224 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9225 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9226
9227 else
9228 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9229
9230 return TRUE;
9231 }
9232
9233 /* Function to keep ARM specific flags in the ELF header. */
9234
9235 static bfd_boolean
9236 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9237 {
9238 if (elf_flags_init (abfd)
9239 && elf_elfheader (abfd)->e_flags != flags)
9240 {
9241 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9242 {
9243 if (flags & EF_ARM_INTERWORK)
9244 (*_bfd_error_handler)
9245 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9246 abfd);
9247 else
9248 _bfd_error_handler
9249 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9250 abfd);
9251 }
9252 }
9253 else
9254 {
9255 elf_elfheader (abfd)->e_flags = flags;
9256 elf_flags_init (abfd) = TRUE;
9257 }
9258
9259 return TRUE;
9260 }
9261
9262 /* Copy backend specific data from one object module to another. */
9263
9264 static bfd_boolean
9265 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9266 {
9267 flagword in_flags;
9268 flagword out_flags;
9269
9270 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9271 return TRUE;
9272
9273 in_flags = elf_elfheader (ibfd)->e_flags;
9274 out_flags = elf_elfheader (obfd)->e_flags;
9275
9276 if (elf_flags_init (obfd)
9277 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9278 && in_flags != out_flags)
9279 {
9280 /* Cannot mix APCS26 and APCS32 code. */
9281 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9282 return FALSE;
9283
9284 /* Cannot mix float APCS and non-float APCS code. */
9285 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9286 return FALSE;
9287
9288 /* If the src and dest have different interworking flags
9289 then turn off the interworking bit. */
9290 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9291 {
9292 if (out_flags & EF_ARM_INTERWORK)
9293 _bfd_error_handler
9294 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9295 obfd, ibfd);
9296
9297 in_flags &= ~EF_ARM_INTERWORK;
9298 }
9299
9300 /* Likewise for PIC, though don't warn for this case. */
9301 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9302 in_flags &= ~EF_ARM_PIC;
9303 }
9304
9305 elf_elfheader (obfd)->e_flags = in_flags;
9306 elf_flags_init (obfd) = TRUE;
9307
9308 /* Also copy the EI_OSABI field. */
9309 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9310 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9311
9312 /* Copy object attributes. */
9313 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9314
9315 return TRUE;
9316 }
9317
9318 /* Values for Tag_ABI_PCS_R9_use. */
9319 enum
9320 {
9321 AEABI_R9_V6,
9322 AEABI_R9_SB,
9323 AEABI_R9_TLS,
9324 AEABI_R9_unused
9325 };
9326
9327 /* Values for Tag_ABI_PCS_RW_data. */
9328 enum
9329 {
9330 AEABI_PCS_RW_data_absolute,
9331 AEABI_PCS_RW_data_PCrel,
9332 AEABI_PCS_RW_data_SBrel,
9333 AEABI_PCS_RW_data_unused
9334 };
9335
9336 /* Values for Tag_ABI_enum_size. */
9337 enum
9338 {
9339 AEABI_enum_unused,
9340 AEABI_enum_short,
9341 AEABI_enum_wide,
9342 AEABI_enum_forced_wide
9343 };
9344
9345 /* Determine whether an object attribute tag takes an integer, a
9346 string or both. */
9347
9348 static int
9349 elf32_arm_obj_attrs_arg_type (int tag)
9350 {
9351 if (tag == Tag_compatibility)
9352 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9353 else if (tag == Tag_nodefaults)
9354 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9355 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9356 return ATTR_TYPE_FLAG_STR_VAL;
9357 else if (tag < 32)
9358 return ATTR_TYPE_FLAG_INT_VAL;
9359 else
9360 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9361 }
9362
9363 /* The ABI defines that Tag_conformance should be emitted first, and that
9364 Tag_nodefaults should be second (if either is defined). This sets those
9365 two positions, and bumps up the position of all the remaining tags to
9366 compensate. */
9367 static int
9368 elf32_arm_obj_attrs_order (int num)
9369 {
9370 if (num == 4)
9371 return Tag_conformance;
9372 if (num == 5)
9373 return Tag_nodefaults;
9374 if ((num - 2) < Tag_nodefaults)
9375 return num - 2;
9376 if ((num - 1) < Tag_conformance)
9377 return num - 1;
9378 return num;
9379 }
9380
9381 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9382 Returns -1 if no architecture could be read. */
9383
9384 static int
9385 get_secondary_compatible_arch (bfd *abfd)
9386 {
9387 obj_attribute *attr =
9388 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9389
9390 /* Note: the tag and its argument below are uleb128 values, though
9391 currently-defined values fit in one byte for each. */
9392 if (attr->s
9393 && attr->s[0] == Tag_CPU_arch
9394 && (attr->s[1] & 128) != 128
9395 && attr->s[2] == 0)
9396 return attr->s[1];
9397
9398 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9399 return -1;
9400 }
9401
9402 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9403 The tag is removed if ARCH is -1. */
9404
9405 static void
9406 set_secondary_compatible_arch (bfd *abfd, int arch)
9407 {
9408 obj_attribute *attr =
9409 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9410
9411 if (arch == -1)
9412 {
9413 attr->s = NULL;
9414 return;
9415 }
9416
9417 /* Note: the tag and its argument below are uleb128 values, though
9418 currently-defined values fit in one byte for each. */
9419 if (!attr->s)
9420 attr->s = bfd_alloc (abfd, 3);
9421 attr->s[0] = Tag_CPU_arch;
9422 attr->s[1] = arch;
9423 attr->s[2] = '\0';
9424 }
9425
9426 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9427 into account. */
9428
9429 static int
9430 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9431 int newtag, int secondary_compat)
9432 {
9433 #define T(X) TAG_CPU_ARCH_##X
9434 int tagl, tagh, result;
9435 const int v6t2[] =
9436 {
9437 T(V6T2), /* PRE_V4. */
9438 T(V6T2), /* V4. */
9439 T(V6T2), /* V4T. */
9440 T(V6T2), /* V5T. */
9441 T(V6T2), /* V5TE. */
9442 T(V6T2), /* V5TEJ. */
9443 T(V6T2), /* V6. */
9444 T(V7), /* V6KZ. */
9445 T(V6T2) /* V6T2. */
9446 };
9447 const int v6k[] =
9448 {
9449 T(V6K), /* PRE_V4. */
9450 T(V6K), /* V4. */
9451 T(V6K), /* V4T. */
9452 T(V6K), /* V5T. */
9453 T(V6K), /* V5TE. */
9454 T(V6K), /* V5TEJ. */
9455 T(V6K), /* V6. */
9456 T(V6KZ), /* V6KZ. */
9457 T(V7), /* V6T2. */
9458 T(V6K) /* V6K. */
9459 };
9460 const int v7[] =
9461 {
9462 T(V7), /* PRE_V4. */
9463 T(V7), /* V4. */
9464 T(V7), /* V4T. */
9465 T(V7), /* V5T. */
9466 T(V7), /* V5TE. */
9467 T(V7), /* V5TEJ. */
9468 T(V7), /* V6. */
9469 T(V7), /* V6KZ. */
9470 T(V7), /* V6T2. */
9471 T(V7), /* V6K. */
9472 T(V7) /* V7. */
9473 };
9474 const int v6_m[] =
9475 {
9476 -1, /* PRE_V4. */
9477 -1, /* V4. */
9478 T(V6K), /* V4T. */
9479 T(V6K), /* V5T. */
9480 T(V6K), /* V5TE. */
9481 T(V6K), /* V5TEJ. */
9482 T(V6K), /* V6. */
9483 T(V6KZ), /* V6KZ. */
9484 T(V7), /* V6T2. */
9485 T(V6K), /* V6K. */
9486 T(V7), /* V7. */
9487 T(V6_M) /* V6_M. */
9488 };
9489 const int v6s_m[] =
9490 {
9491 -1, /* PRE_V4. */
9492 -1, /* V4. */
9493 T(V6K), /* V4T. */
9494 T(V6K), /* V5T. */
9495 T(V6K), /* V5TE. */
9496 T(V6K), /* V5TEJ. */
9497 T(V6K), /* V6. */
9498 T(V6KZ), /* V6KZ. */
9499 T(V7), /* V6T2. */
9500 T(V6K), /* V6K. */
9501 T(V7), /* V7. */
9502 T(V6S_M), /* V6_M. */
9503 T(V6S_M) /* V6S_M. */
9504 };
9505 const int v4t_plus_v6_m[] =
9506 {
9507 -1, /* PRE_V4. */
9508 -1, /* V4. */
9509 T(V4T), /* V4T. */
9510 T(V5T), /* V5T. */
9511 T(V5TE), /* V5TE. */
9512 T(V5TEJ), /* V5TEJ. */
9513 T(V6), /* V6. */
9514 T(V6KZ), /* V6KZ. */
9515 T(V6T2), /* V6T2. */
9516 T(V6K), /* V6K. */
9517 T(V7), /* V7. */
9518 T(V6_M), /* V6_M. */
9519 T(V6S_M), /* V6S_M. */
9520 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9521 };
9522 const int *comb[] =
9523 {
9524 v6t2,
9525 v6k,
9526 v7,
9527 v6_m,
9528 v6s_m,
9529 /* Pseudo-architecture. */
9530 v4t_plus_v6_m
9531 };
9532
9533 /* Check we've not got a higher architecture than we know about. */
9534
9535 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9536 {
9537 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9538 return -1;
9539 }
9540
9541 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9542
9543 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9544 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9545 oldtag = T(V4T_PLUS_V6_M);
9546
9547 /* And override the new tag if we have a Tag_also_compatible_with on the
9548 input. */
9549
9550 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9551 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9552 newtag = T(V4T_PLUS_V6_M);
9553
9554 tagl = (oldtag < newtag) ? oldtag : newtag;
9555 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9556
9557 /* Architectures before V6KZ add features monotonically. */
9558 if (tagh <= TAG_CPU_ARCH_V6KZ)
9559 return result;
9560
9561 result = comb[tagh - T(V6T2)][tagl];
9562
9563 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9564 as the canonical version. */
9565 if (result == T(V4T_PLUS_V6_M))
9566 {
9567 result = T(V4T);
9568 *secondary_compat_out = T(V6_M);
9569 }
9570 else
9571 *secondary_compat_out = -1;
9572
9573 if (result == -1)
9574 {
9575 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9576 ibfd, oldtag, newtag);
9577 return -1;
9578 }
9579
9580 return result;
9581 #undef T
9582 }
9583
9584 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9585 are conflicting attributes. */
9586
9587 static bfd_boolean
9588 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9589 {
9590 obj_attribute *in_attr;
9591 obj_attribute *out_attr;
9592 obj_attribute_list *in_list;
9593 obj_attribute_list *out_list;
9594 obj_attribute_list **out_listp;
9595 /* Some tags have 0 = don't care, 1 = strong requirement,
9596 2 = weak requirement. */
9597 static const int order_021[3] = {0, 2, 1};
9598 /* For use with Tag_VFP_arch. */
9599 static const int order_01243[5] = {0, 1, 2, 4, 3};
9600 int i;
9601 bfd_boolean result = TRUE;
9602
9603 /* Skip the linker stubs file. This preserves previous behavior
9604 of accepting unknown attributes in the first input file - but
9605 is that a bug? */
9606 if (ibfd->flags & BFD_LINKER_CREATED)
9607 return TRUE;
9608
9609 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9610 {
9611 /* This is the first object. Copy the attributes. */
9612 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9613
9614 /* Use the Tag_null value to indicate the attributes have been
9615 initialized. */
9616 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9617
9618 return TRUE;
9619 }
9620
9621 in_attr = elf_known_obj_attributes_proc (ibfd);
9622 out_attr = elf_known_obj_attributes_proc (obfd);
9623 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9624 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9625 {
9626 /* Ignore mismatches if the object doesn't use floating point. */
9627 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9628 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9629 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9630 {
9631 _bfd_error_handler
9632 (_("error: %B uses VFP register arguments, %B does not"),
9633 ibfd, obfd);
9634 result = FALSE;
9635 }
9636 }
9637
9638 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9639 {
9640 /* Merge this attribute with existing attributes. */
9641 switch (i)
9642 {
9643 case Tag_CPU_raw_name:
9644 case Tag_CPU_name:
9645 /* These are merged after Tag_CPU_arch. */
9646 break;
9647
9648 case Tag_ABI_optimization_goals:
9649 case Tag_ABI_FP_optimization_goals:
9650 /* Use the first value seen. */
9651 break;
9652
9653 case Tag_CPU_arch:
9654 {
9655 int secondary_compat = -1, secondary_compat_out = -1;
9656 unsigned int saved_out_attr = out_attr[i].i;
9657 static const char *name_table[] = {
9658 /* These aren't real CPU names, but we can't guess
9659 that from the architecture version alone. */
9660 "Pre v4",
9661 "ARM v4",
9662 "ARM v4T",
9663 "ARM v5T",
9664 "ARM v5TE",
9665 "ARM v5TEJ",
9666 "ARM v6",
9667 "ARM v6KZ",
9668 "ARM v6T2",
9669 "ARM v6K",
9670 "ARM v7",
9671 "ARM v6-M",
9672 "ARM v6S-M"
9673 };
9674
9675 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9676 secondary_compat = get_secondary_compatible_arch (ibfd);
9677 secondary_compat_out = get_secondary_compatible_arch (obfd);
9678 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9679 &secondary_compat_out,
9680 in_attr[i].i,
9681 secondary_compat);
9682 set_secondary_compatible_arch (obfd, secondary_compat_out);
9683
9684 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9685 if (out_attr[i].i == saved_out_attr)
9686 ; /* Leave the names alone. */
9687 else if (out_attr[i].i == in_attr[i].i)
9688 {
9689 /* The output architecture has been changed to match the
9690 input architecture. Use the input names. */
9691 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9692 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9693 : NULL;
9694 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9695 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9696 : NULL;
9697 }
9698 else
9699 {
9700 out_attr[Tag_CPU_name].s = NULL;
9701 out_attr[Tag_CPU_raw_name].s = NULL;
9702 }
9703
9704 /* If we still don't have a value for Tag_CPU_name,
9705 make one up now. Tag_CPU_raw_name remains blank. */
9706 if (out_attr[Tag_CPU_name].s == NULL
9707 && out_attr[i].i < ARRAY_SIZE (name_table))
9708 out_attr[Tag_CPU_name].s =
9709 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9710 }
9711 break;
9712
9713 case Tag_ARM_ISA_use:
9714 case Tag_THUMB_ISA_use:
9715 case Tag_WMMX_arch:
9716 case Tag_Advanced_SIMD_arch:
9717 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9718 case Tag_ABI_FP_rounding:
9719 case Tag_ABI_FP_exceptions:
9720 case Tag_ABI_FP_user_exceptions:
9721 case Tag_ABI_FP_number_model:
9722 case Tag_VFP_HP_extension:
9723 case Tag_CPU_unaligned_access:
9724 case Tag_T2EE_use:
9725 case Tag_Virtualization_use:
9726 case Tag_MPextension_use:
9727 /* Use the largest value specified. */
9728 if (in_attr[i].i > out_attr[i].i)
9729 out_attr[i].i = in_attr[i].i;
9730 break;
9731
9732 case Tag_ABI_align8_preserved:
9733 case Tag_ABI_PCS_RO_data:
9734 /* Use the smallest value specified. */
9735 if (in_attr[i].i < out_attr[i].i)
9736 out_attr[i].i = in_attr[i].i;
9737 break;
9738
9739 case Tag_ABI_align8_needed:
9740 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9741 && (in_attr[Tag_ABI_align8_preserved].i == 0
9742 || out_attr[Tag_ABI_align8_preserved].i == 0))
9743 {
9744 /* This error message should be enabled once all non-conformant
9745 binaries in the toolchain have had the attributes set
9746 properly.
9747 _bfd_error_handler
9748 (_("error: %B: 8-byte data alignment conflicts with %B"),
9749 obfd, ibfd);
9750 result = FALSE; */
9751 }
9752 /* Fall through. */
9753 case Tag_ABI_FP_denormal:
9754 case Tag_ABI_PCS_GOT_use:
9755 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9756 value if greater than 2 (for future-proofing). */
9757 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9758 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9759 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9760 out_attr[i].i = in_attr[i].i;
9761 break;
9762
9763
9764 case Tag_CPU_arch_profile:
9765 if (out_attr[i].i != in_attr[i].i)
9766 {
9767 /* 0 will merge with anything.
9768 'A' and 'S' merge to 'A'.
9769 'R' and 'S' merge to 'R'.
9770 'M' and 'A|R|S' is an error. */
9771 if (out_attr[i].i == 0
9772 || (out_attr[i].i == 'S'
9773 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9774 out_attr[i].i = in_attr[i].i;
9775 else if (in_attr[i].i == 0
9776 || (in_attr[i].i == 'S'
9777 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9778 ; /* Do nothing. */
9779 else
9780 {
9781 _bfd_error_handler
9782 (_("error: %B: Conflicting architecture profiles %c/%c"),
9783 ibfd,
9784 in_attr[i].i ? in_attr[i].i : '0',
9785 out_attr[i].i ? out_attr[i].i : '0');
9786 result = FALSE;
9787 }
9788 }
9789 break;
9790 case Tag_VFP_arch:
9791 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9792 largest value if greater than 4 (for future-proofing). */
9793 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9794 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9795 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9796 out_attr[i].i = in_attr[i].i;
9797 break;
9798 case Tag_PCS_config:
9799 if (out_attr[i].i == 0)
9800 out_attr[i].i = in_attr[i].i;
9801 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9802 {
9803 /* It's sometimes ok to mix different configs, so this is only
9804 a warning. */
9805 _bfd_error_handler
9806 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9807 }
9808 break;
9809 case Tag_ABI_PCS_R9_use:
9810 if (in_attr[i].i != out_attr[i].i
9811 && out_attr[i].i != AEABI_R9_unused
9812 && in_attr[i].i != AEABI_R9_unused)
9813 {
9814 _bfd_error_handler
9815 (_("error: %B: Conflicting use of R9"), ibfd);
9816 result = FALSE;
9817 }
9818 if (out_attr[i].i == AEABI_R9_unused)
9819 out_attr[i].i = in_attr[i].i;
9820 break;
9821 case Tag_ABI_PCS_RW_data:
9822 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9823 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9824 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9825 {
9826 _bfd_error_handler
9827 (_("error: %B: SB relative addressing conflicts with use of R9"),
9828 ibfd);
9829 result = FALSE;
9830 }
9831 /* Use the smallest value specified. */
9832 if (in_attr[i].i < out_attr[i].i)
9833 out_attr[i].i = in_attr[i].i;
9834 break;
9835 case Tag_ABI_PCS_wchar_t:
9836 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9837 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9838 {
9839 _bfd_error_handler
9840 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9841 ibfd, in_attr[i].i, out_attr[i].i);
9842 }
9843 else if (in_attr[i].i && !out_attr[i].i)
9844 out_attr[i].i = in_attr[i].i;
9845 break;
9846 case Tag_ABI_enum_size:
9847 if (in_attr[i].i != AEABI_enum_unused)
9848 {
9849 if (out_attr[i].i == AEABI_enum_unused
9850 || out_attr[i].i == AEABI_enum_forced_wide)
9851 {
9852 /* The existing object is compatible with anything.
9853 Use whatever requirements the new object has. */
9854 out_attr[i].i = in_attr[i].i;
9855 }
9856 else if (in_attr[i].i != AEABI_enum_forced_wide
9857 && out_attr[i].i != in_attr[i].i
9858 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9859 {
9860 static const char *aeabi_enum_names[] =
9861 { "", "variable-size", "32-bit", "" };
9862 const char *in_name =
9863 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9864 ? aeabi_enum_names[in_attr[i].i]
9865 : "<unknown>";
9866 const char *out_name =
9867 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9868 ? aeabi_enum_names[out_attr[i].i]
9869 : "<unknown>";
9870 _bfd_error_handler
9871 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9872 ibfd, in_name, out_name);
9873 }
9874 }
9875 break;
9876 case Tag_ABI_VFP_args:
9877 /* Aready done. */
9878 break;
9879 case Tag_ABI_WMMX_args:
9880 if (in_attr[i].i != out_attr[i].i)
9881 {
9882 _bfd_error_handler
9883 (_("error: %B uses iWMMXt register arguments, %B does not"),
9884 ibfd, obfd);
9885 result = FALSE;
9886 }
9887 break;
9888 case Tag_compatibility:
9889 /* Merged in target-independent code. */
9890 break;
9891 case Tag_ABI_HardFP_use:
9892 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9893 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9894 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9895 out_attr[i].i = 3;
9896 else if (in_attr[i].i > out_attr[i].i)
9897 out_attr[i].i = in_attr[i].i;
9898 break;
9899 case Tag_ABI_FP_16bit_format:
9900 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9901 {
9902 if (in_attr[i].i != out_attr[i].i)
9903 {
9904 _bfd_error_handler
9905 (_("error: fp16 format mismatch between %B and %B"),
9906 ibfd, obfd);
9907 result = FALSE;
9908 }
9909 }
9910 if (in_attr[i].i != 0)
9911 out_attr[i].i = in_attr[i].i;
9912 break;
9913
9914 case Tag_nodefaults:
9915 /* This tag is set if it exists, but the value is unused (and is
9916 typically zero). We don't actually need to do anything here -
9917 the merge happens automatically when the type flags are merged
9918 below. */
9919 break;
9920 case Tag_also_compatible_with:
9921 /* Already done in Tag_CPU_arch. */
9922 break;
9923 case Tag_conformance:
9924 /* Keep the attribute if it matches. Throw it away otherwise.
9925 No attribute means no claim to conform. */
9926 if (!in_attr[i].s || !out_attr[i].s
9927 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9928 out_attr[i].s = NULL;
9929 break;
9930
9931 default:
9932 {
9933 bfd *err_bfd = NULL;
9934
9935 /* The "known_obj_attributes" table does contain some undefined
9936 attributes. Ensure that there are unused. */
9937 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9938 err_bfd = obfd;
9939 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9940 err_bfd = ibfd;
9941
9942 if (err_bfd != NULL)
9943 {
9944 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9945 if ((i & 127) < 64)
9946 {
9947 _bfd_error_handler
9948 (_("%B: Unknown mandatory EABI object attribute %d"),
9949 err_bfd, i);
9950 bfd_set_error (bfd_error_bad_value);
9951 result = FALSE;
9952 }
9953 else
9954 {
9955 _bfd_error_handler
9956 (_("Warning: %B: Unknown EABI object attribute %d"),
9957 err_bfd, i);
9958 }
9959 }
9960
9961 /* Only pass on attributes that match in both inputs. */
9962 if (in_attr[i].i != out_attr[i].i
9963 || in_attr[i].s != out_attr[i].s
9964 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9965 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9966 {
9967 out_attr[i].i = 0;
9968 out_attr[i].s = NULL;
9969 }
9970 }
9971 }
9972
9973 /* If out_attr was copied from in_attr then it won't have a type yet. */
9974 if (in_attr[i].type && !out_attr[i].type)
9975 out_attr[i].type = in_attr[i].type;
9976 }
9977
9978 /* Merge Tag_compatibility attributes and any common GNU ones. */
9979 _bfd_elf_merge_object_attributes (ibfd, obfd);
9980
9981 /* Check for any attributes not known on ARM. */
9982 in_list = elf_other_obj_attributes_proc (ibfd);
9983 out_listp = &elf_other_obj_attributes_proc (obfd);
9984 out_list = *out_listp;
9985
9986 for (; in_list || out_list; )
9987 {
9988 bfd *err_bfd = NULL;
9989 int err_tag = 0;
9990
9991 /* The tags for each list are in numerical order. */
9992 /* If the tags are equal, then merge. */
9993 if (out_list && (!in_list || in_list->tag > out_list->tag))
9994 {
9995 /* This attribute only exists in obfd. We can't merge, and we don't
9996 know what the tag means, so delete it. */
9997 err_bfd = obfd;
9998 err_tag = out_list->tag;
9999 *out_listp = out_list->next;
10000 out_list = *out_listp;
10001 }
10002 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10003 {
10004 /* This attribute only exists in ibfd. We can't merge, and we don't
10005 know what the tag means, so ignore it. */
10006 err_bfd = ibfd;
10007 err_tag = in_list->tag;
10008 in_list = in_list->next;
10009 }
10010 else /* The tags are equal. */
10011 {
10012 /* As present, all attributes in the list are unknown, and
10013 therefore can't be merged meaningfully. */
10014 err_bfd = obfd;
10015 err_tag = out_list->tag;
10016
10017 /* Only pass on attributes that match in both inputs. */
10018 if (in_list->attr.i != out_list->attr.i
10019 || in_list->attr.s != out_list->attr.s
10020 || (in_list->attr.s && out_list->attr.s
10021 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10022 {
10023 /* No match. Delete the attribute. */
10024 *out_listp = out_list->next;
10025 out_list = *out_listp;
10026 }
10027 else
10028 {
10029 /* Matched. Keep the attribute and move to the next. */
10030 out_list = out_list->next;
10031 in_list = in_list->next;
10032 }
10033 }
10034
10035 if (err_bfd)
10036 {
10037 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10038 if ((err_tag & 127) < 64)
10039 {
10040 _bfd_error_handler
10041 (_("%B: Unknown mandatory EABI object attribute %d"),
10042 err_bfd, err_tag);
10043 bfd_set_error (bfd_error_bad_value);
10044 result = FALSE;
10045 }
10046 else
10047 {
10048 _bfd_error_handler
10049 (_("Warning: %B: Unknown EABI object attribute %d"),
10050 err_bfd, err_tag);
10051 }
10052 }
10053 }
10054 return result;
10055 }
10056
10057
10058 /* Return TRUE if the two EABI versions are incompatible. */
10059
10060 static bfd_boolean
10061 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10062 {
10063 /* v4 and v5 are the same spec before and after it was released,
10064 so allow mixing them. */
10065 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10066 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10067 return TRUE;
10068
10069 return (iver == over);
10070 }
10071
10072 /* Merge backend specific data from an object file to the output
10073 object file when linking. */
10074
10075 static bfd_boolean
10076 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10077 {
10078 flagword out_flags;
10079 flagword in_flags;
10080 bfd_boolean flags_compatible = TRUE;
10081 asection *sec;
10082
10083 /* Check if we have the same endianess. */
10084 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10085 return FALSE;
10086
10087 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10088 return TRUE;
10089
10090 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10091 return FALSE;
10092
10093 /* The input BFD must have had its flags initialised. */
10094 /* The following seems bogus to me -- The flags are initialized in
10095 the assembler but I don't think an elf_flags_init field is
10096 written into the object. */
10097 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10098
10099 in_flags = elf_elfheader (ibfd)->e_flags;
10100 out_flags = elf_elfheader (obfd)->e_flags;
10101
10102 /* In theory there is no reason why we couldn't handle this. However
10103 in practice it isn't even close to working and there is no real
10104 reason to want it. */
10105 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10106 && !(ibfd->flags & DYNAMIC)
10107 && (in_flags & EF_ARM_BE8))
10108 {
10109 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10110 ibfd);
10111 return FALSE;
10112 }
10113
10114 if (!elf_flags_init (obfd))
10115 {
10116 /* If the input is the default architecture and had the default
10117 flags then do not bother setting the flags for the output
10118 architecture, instead allow future merges to do this. If no
10119 future merges ever set these flags then they will retain their
10120 uninitialised values, which surprise surprise, correspond
10121 to the default values. */
10122 if (bfd_get_arch_info (ibfd)->the_default
10123 && elf_elfheader (ibfd)->e_flags == 0)
10124 return TRUE;
10125
10126 elf_flags_init (obfd) = TRUE;
10127 elf_elfheader (obfd)->e_flags = in_flags;
10128
10129 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10130 && bfd_get_arch_info (obfd)->the_default)
10131 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10132
10133 return TRUE;
10134 }
10135
10136 /* Determine what should happen if the input ARM architecture
10137 does not match the output ARM architecture. */
10138 if (! bfd_arm_merge_machines (ibfd, obfd))
10139 return FALSE;
10140
10141 /* Identical flags must be compatible. */
10142 if (in_flags == out_flags)
10143 return TRUE;
10144
10145 /* Check to see if the input BFD actually contains any sections. If
10146 not, its flags may not have been initialised either, but it
10147 cannot actually cause any incompatiblity. Do not short-circuit
10148 dynamic objects; their section list may be emptied by
10149 elf_link_add_object_symbols.
10150
10151 Also check to see if there are no code sections in the input.
10152 In this case there is no need to check for code specific flags.
10153 XXX - do we need to worry about floating-point format compatability
10154 in data sections ? */
10155 if (!(ibfd->flags & DYNAMIC))
10156 {
10157 bfd_boolean null_input_bfd = TRUE;
10158 bfd_boolean only_data_sections = TRUE;
10159
10160 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10161 {
10162 /* Ignore synthetic glue sections. */
10163 if (strcmp (sec->name, ".glue_7")
10164 && strcmp (sec->name, ".glue_7t"))
10165 {
10166 if ((bfd_get_section_flags (ibfd, sec)
10167 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10168 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10169 only_data_sections = FALSE;
10170
10171 null_input_bfd = FALSE;
10172 break;
10173 }
10174 }
10175
10176 if (null_input_bfd || only_data_sections)
10177 return TRUE;
10178 }
10179
10180 /* Complain about various flag mismatches. */
10181 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10182 EF_ARM_EABI_VERSION (out_flags)))
10183 {
10184 _bfd_error_handler
10185 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10186 ibfd, obfd,
10187 (in_flags & EF_ARM_EABIMASK) >> 24,
10188 (out_flags & EF_ARM_EABIMASK) >> 24);
10189 return FALSE;
10190 }
10191
10192 /* Not sure what needs to be checked for EABI versions >= 1. */
10193 /* VxWorks libraries do not use these flags. */
10194 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10195 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10196 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10197 {
10198 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10199 {
10200 _bfd_error_handler
10201 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10202 ibfd, obfd,
10203 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10204 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10205 flags_compatible = FALSE;
10206 }
10207
10208 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10209 {
10210 if (in_flags & EF_ARM_APCS_FLOAT)
10211 _bfd_error_handler
10212 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10213 ibfd, obfd);
10214 else
10215 _bfd_error_handler
10216 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10217 ibfd, obfd);
10218
10219 flags_compatible = FALSE;
10220 }
10221
10222 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10223 {
10224 if (in_flags & EF_ARM_VFP_FLOAT)
10225 _bfd_error_handler
10226 (_("error: %B uses VFP instructions, whereas %B does not"),
10227 ibfd, obfd);
10228 else
10229 _bfd_error_handler
10230 (_("error: %B uses FPA instructions, whereas %B does not"),
10231 ibfd, obfd);
10232
10233 flags_compatible = FALSE;
10234 }
10235
10236 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10237 {
10238 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10239 _bfd_error_handler
10240 (_("error: %B uses Maverick instructions, whereas %B does not"),
10241 ibfd, obfd);
10242 else
10243 _bfd_error_handler
10244 (_("error: %B does not use Maverick instructions, whereas %B does"),
10245 ibfd, obfd);
10246
10247 flags_compatible = FALSE;
10248 }
10249
10250 #ifdef EF_ARM_SOFT_FLOAT
10251 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10252 {
10253 /* We can allow interworking between code that is VFP format
10254 layout, and uses either soft float or integer regs for
10255 passing floating point arguments and results. We already
10256 know that the APCS_FLOAT flags match; similarly for VFP
10257 flags. */
10258 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10259 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10260 {
10261 if (in_flags & EF_ARM_SOFT_FLOAT)
10262 _bfd_error_handler
10263 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10264 ibfd, obfd);
10265 else
10266 _bfd_error_handler
10267 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10268 ibfd, obfd);
10269
10270 flags_compatible = FALSE;
10271 }
10272 }
10273 #endif
10274
10275 /* Interworking mismatch is only a warning. */
10276 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10277 {
10278 if (in_flags & EF_ARM_INTERWORK)
10279 {
10280 _bfd_error_handler
10281 (_("Warning: %B supports interworking, whereas %B does not"),
10282 ibfd, obfd);
10283 }
10284 else
10285 {
10286 _bfd_error_handler
10287 (_("Warning: %B does not support interworking, whereas %B does"),
10288 ibfd, obfd);
10289 }
10290 }
10291 }
10292
10293 return flags_compatible;
10294 }
10295
10296 /* Display the flags field. */
10297
10298 static bfd_boolean
10299 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10300 {
10301 FILE * file = (FILE *) ptr;
10302 unsigned long flags;
10303
10304 BFD_ASSERT (abfd != NULL && ptr != NULL);
10305
10306 /* Print normal ELF private data. */
10307 _bfd_elf_print_private_bfd_data (abfd, ptr);
10308
10309 flags = elf_elfheader (abfd)->e_flags;
10310 /* Ignore init flag - it may not be set, despite the flags field
10311 containing valid data. */
10312
10313 /* xgettext:c-format */
10314 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10315
10316 switch (EF_ARM_EABI_VERSION (flags))
10317 {
10318 case EF_ARM_EABI_UNKNOWN:
10319 /* The following flag bits are GNU extensions and not part of the
10320 official ARM ELF extended ABI. Hence they are only decoded if
10321 the EABI version is not set. */
10322 if (flags & EF_ARM_INTERWORK)
10323 fprintf (file, _(" [interworking enabled]"));
10324
10325 if (flags & EF_ARM_APCS_26)
10326 fprintf (file, " [APCS-26]");
10327 else
10328 fprintf (file, " [APCS-32]");
10329
10330 if (flags & EF_ARM_VFP_FLOAT)
10331 fprintf (file, _(" [VFP float format]"));
10332 else if (flags & EF_ARM_MAVERICK_FLOAT)
10333 fprintf (file, _(" [Maverick float format]"));
10334 else
10335 fprintf (file, _(" [FPA float format]"));
10336
10337 if (flags & EF_ARM_APCS_FLOAT)
10338 fprintf (file, _(" [floats passed in float registers]"));
10339
10340 if (flags & EF_ARM_PIC)
10341 fprintf (file, _(" [position independent]"));
10342
10343 if (flags & EF_ARM_NEW_ABI)
10344 fprintf (file, _(" [new ABI]"));
10345
10346 if (flags & EF_ARM_OLD_ABI)
10347 fprintf (file, _(" [old ABI]"));
10348
10349 if (flags & EF_ARM_SOFT_FLOAT)
10350 fprintf (file, _(" [software FP]"));
10351
10352 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10353 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10354 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10355 | EF_ARM_MAVERICK_FLOAT);
10356 break;
10357
10358 case EF_ARM_EABI_VER1:
10359 fprintf (file, _(" [Version1 EABI]"));
10360
10361 if (flags & EF_ARM_SYMSARESORTED)
10362 fprintf (file, _(" [sorted symbol table]"));
10363 else
10364 fprintf (file, _(" [unsorted symbol table]"));
10365
10366 flags &= ~ EF_ARM_SYMSARESORTED;
10367 break;
10368
10369 case EF_ARM_EABI_VER2:
10370 fprintf (file, _(" [Version2 EABI]"));
10371
10372 if (flags & EF_ARM_SYMSARESORTED)
10373 fprintf (file, _(" [sorted symbol table]"));
10374 else
10375 fprintf (file, _(" [unsorted symbol table]"));
10376
10377 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10378 fprintf (file, _(" [dynamic symbols use segment index]"));
10379
10380 if (flags & EF_ARM_MAPSYMSFIRST)
10381 fprintf (file, _(" [mapping symbols precede others]"));
10382
10383 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10384 | EF_ARM_MAPSYMSFIRST);
10385 break;
10386
10387 case EF_ARM_EABI_VER3:
10388 fprintf (file, _(" [Version3 EABI]"));
10389 break;
10390
10391 case EF_ARM_EABI_VER4:
10392 fprintf (file, _(" [Version4 EABI]"));
10393 goto eabi;
10394
10395 case EF_ARM_EABI_VER5:
10396 fprintf (file, _(" [Version5 EABI]"));
10397 eabi:
10398 if (flags & EF_ARM_BE8)
10399 fprintf (file, _(" [BE8]"));
10400
10401 if (flags & EF_ARM_LE8)
10402 fprintf (file, _(" [LE8]"));
10403
10404 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10405 break;
10406
10407 default:
10408 fprintf (file, _(" <EABI version unrecognised>"));
10409 break;
10410 }
10411
10412 flags &= ~ EF_ARM_EABIMASK;
10413
10414 if (flags & EF_ARM_RELEXEC)
10415 fprintf (file, _(" [relocatable executable]"));
10416
10417 if (flags & EF_ARM_HASENTRY)
10418 fprintf (file, _(" [has entry point]"));
10419
10420 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10421
10422 if (flags)
10423 fprintf (file, _("<Unrecognised flag bits set>"));
10424
10425 fputc ('\n', file);
10426
10427 return TRUE;
10428 }
10429
10430 static int
10431 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10432 {
10433 switch (ELF_ST_TYPE (elf_sym->st_info))
10434 {
10435 case STT_ARM_TFUNC:
10436 return ELF_ST_TYPE (elf_sym->st_info);
10437
10438 case STT_ARM_16BIT:
10439 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10440 This allows us to distinguish between data used by Thumb instructions
10441 and non-data (which is probably code) inside Thumb regions of an
10442 executable. */
10443 if (type != STT_OBJECT && type != STT_TLS)
10444 return ELF_ST_TYPE (elf_sym->st_info);
10445 break;
10446
10447 default:
10448 break;
10449 }
10450
10451 return type;
10452 }
10453
10454 static asection *
10455 elf32_arm_gc_mark_hook (asection *sec,
10456 struct bfd_link_info *info,
10457 Elf_Internal_Rela *rel,
10458 struct elf_link_hash_entry *h,
10459 Elf_Internal_Sym *sym)
10460 {
10461 if (h != NULL)
10462 switch (ELF32_R_TYPE (rel->r_info))
10463 {
10464 case R_ARM_GNU_VTINHERIT:
10465 case R_ARM_GNU_VTENTRY:
10466 return NULL;
10467 }
10468
10469 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10470 }
10471
10472 /* Update the got entry reference counts for the section being removed. */
10473
10474 static bfd_boolean
10475 elf32_arm_gc_sweep_hook (bfd * abfd,
10476 struct bfd_link_info * info,
10477 asection * sec,
10478 const Elf_Internal_Rela * relocs)
10479 {
10480 Elf_Internal_Shdr *symtab_hdr;
10481 struct elf_link_hash_entry **sym_hashes;
10482 bfd_signed_vma *local_got_refcounts;
10483 const Elf_Internal_Rela *rel, *relend;
10484 struct elf32_arm_link_hash_table * globals;
10485
10486 if (info->relocatable)
10487 return TRUE;
10488
10489 globals = elf32_arm_hash_table (info);
10490
10491 elf_section_data (sec)->local_dynrel = NULL;
10492
10493 symtab_hdr = & elf_symtab_hdr (abfd);
10494 sym_hashes = elf_sym_hashes (abfd);
10495 local_got_refcounts = elf_local_got_refcounts (abfd);
10496
10497 check_use_blx (globals);
10498
10499 relend = relocs + sec->reloc_count;
10500 for (rel = relocs; rel < relend; rel++)
10501 {
10502 unsigned long r_symndx;
10503 struct elf_link_hash_entry *h = NULL;
10504 int r_type;
10505
10506 r_symndx = ELF32_R_SYM (rel->r_info);
10507 if (r_symndx >= symtab_hdr->sh_info)
10508 {
10509 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10510 while (h->root.type == bfd_link_hash_indirect
10511 || h->root.type == bfd_link_hash_warning)
10512 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10513 }
10514
10515 r_type = ELF32_R_TYPE (rel->r_info);
10516 r_type = arm_real_reloc_type (globals, r_type);
10517 switch (r_type)
10518 {
10519 case R_ARM_GOT32:
10520 case R_ARM_GOT_PREL:
10521 case R_ARM_TLS_GD32:
10522 case R_ARM_TLS_IE32:
10523 if (h != NULL)
10524 {
10525 if (h->got.refcount > 0)
10526 h->got.refcount -= 1;
10527 }
10528 else if (local_got_refcounts != NULL)
10529 {
10530 if (local_got_refcounts[r_symndx] > 0)
10531 local_got_refcounts[r_symndx] -= 1;
10532 }
10533 break;
10534
10535 case R_ARM_TLS_LDM32:
10536 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10537 break;
10538
10539 case R_ARM_ABS32:
10540 case R_ARM_ABS32_NOI:
10541 case R_ARM_REL32:
10542 case R_ARM_REL32_NOI:
10543 case R_ARM_PC24:
10544 case R_ARM_PLT32:
10545 case R_ARM_CALL:
10546 case R_ARM_JUMP24:
10547 case R_ARM_PREL31:
10548 case R_ARM_THM_CALL:
10549 case R_ARM_THM_JUMP24:
10550 case R_ARM_THM_JUMP19:
10551 case R_ARM_MOVW_ABS_NC:
10552 case R_ARM_MOVT_ABS:
10553 case R_ARM_MOVW_PREL_NC:
10554 case R_ARM_MOVT_PREL:
10555 case R_ARM_THM_MOVW_ABS_NC:
10556 case R_ARM_THM_MOVT_ABS:
10557 case R_ARM_THM_MOVW_PREL_NC:
10558 case R_ARM_THM_MOVT_PREL:
10559 /* Should the interworking branches be here also? */
10560
10561 if (h != NULL)
10562 {
10563 struct elf32_arm_link_hash_entry *eh;
10564 struct elf32_arm_relocs_copied **pp;
10565 struct elf32_arm_relocs_copied *p;
10566
10567 eh = (struct elf32_arm_link_hash_entry *) h;
10568
10569 if (h->plt.refcount > 0)
10570 {
10571 h->plt.refcount -= 1;
10572 if (r_type == R_ARM_THM_CALL)
10573 eh->plt_maybe_thumb_refcount--;
10574
10575 if (r_type == R_ARM_THM_JUMP24
10576 || r_type == R_ARM_THM_JUMP19)
10577 eh->plt_thumb_refcount--;
10578 }
10579
10580 if (r_type == R_ARM_ABS32
10581 || r_type == R_ARM_REL32
10582 || r_type == R_ARM_ABS32_NOI
10583 || r_type == R_ARM_REL32_NOI)
10584 {
10585 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10586 pp = &p->next)
10587 if (p->section == sec)
10588 {
10589 p->count -= 1;
10590 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10591 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10592 p->pc_count -= 1;
10593 if (p->count == 0)
10594 *pp = p->next;
10595 break;
10596 }
10597 }
10598 }
10599 break;
10600
10601 default:
10602 break;
10603 }
10604 }
10605
10606 return TRUE;
10607 }
10608
10609 /* Look through the relocs for a section during the first phase. */
10610
10611 static bfd_boolean
10612 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10613 asection *sec, const Elf_Internal_Rela *relocs)
10614 {
10615 Elf_Internal_Shdr *symtab_hdr;
10616 struct elf_link_hash_entry **sym_hashes;
10617 const Elf_Internal_Rela *rel;
10618 const Elf_Internal_Rela *rel_end;
10619 bfd *dynobj;
10620 asection *sreloc;
10621 bfd_vma *local_got_offsets;
10622 struct elf32_arm_link_hash_table *htab;
10623 bfd_boolean needs_plt;
10624 unsigned long nsyms;
10625
10626 if (info->relocatable)
10627 return TRUE;
10628
10629 BFD_ASSERT (is_arm_elf (abfd));
10630
10631 htab = elf32_arm_hash_table (info);
10632 sreloc = NULL;
10633
10634 /* Create dynamic sections for relocatable executables so that we can
10635 copy relocations. */
10636 if (htab->root.is_relocatable_executable
10637 && ! htab->root.dynamic_sections_created)
10638 {
10639 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10640 return FALSE;
10641 }
10642
10643 dynobj = elf_hash_table (info)->dynobj;
10644 local_got_offsets = elf_local_got_offsets (abfd);
10645
10646 symtab_hdr = & elf_symtab_hdr (abfd);
10647 sym_hashes = elf_sym_hashes (abfd);
10648 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10649
10650 rel_end = relocs + sec->reloc_count;
10651 for (rel = relocs; rel < rel_end; rel++)
10652 {
10653 struct elf_link_hash_entry *h;
10654 struct elf32_arm_link_hash_entry *eh;
10655 unsigned long r_symndx;
10656 int r_type;
10657
10658 r_symndx = ELF32_R_SYM (rel->r_info);
10659 r_type = ELF32_R_TYPE (rel->r_info);
10660 r_type = arm_real_reloc_type (htab, r_type);
10661
10662 if (r_symndx >= nsyms
10663 /* PR 9934: It is possible to have relocations that do not
10664 refer to symbols, thus it is also possible to have an
10665 object file containing relocations but no symbol table. */
10666 && (r_symndx > 0 || nsyms > 0))
10667 {
10668 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10669 r_symndx);
10670 return FALSE;
10671 }
10672
10673 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10674 h = NULL;
10675 else
10676 {
10677 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10678 while (h->root.type == bfd_link_hash_indirect
10679 || h->root.type == bfd_link_hash_warning)
10680 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10681 }
10682
10683 eh = (struct elf32_arm_link_hash_entry *) h;
10684
10685 switch (r_type)
10686 {
10687 case R_ARM_GOT32:
10688 case R_ARM_GOT_PREL:
10689 case R_ARM_TLS_GD32:
10690 case R_ARM_TLS_IE32:
10691 /* This symbol requires a global offset table entry. */
10692 {
10693 int tls_type, old_tls_type;
10694
10695 switch (r_type)
10696 {
10697 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10698 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10699 default: tls_type = GOT_NORMAL; break;
10700 }
10701
10702 if (h != NULL)
10703 {
10704 h->got.refcount++;
10705 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10706 }
10707 else
10708 {
10709 bfd_signed_vma *local_got_refcounts;
10710
10711 /* This is a global offset table entry for a local symbol. */
10712 local_got_refcounts = elf_local_got_refcounts (abfd);
10713 if (local_got_refcounts == NULL)
10714 {
10715 bfd_size_type size;
10716
10717 size = symtab_hdr->sh_info;
10718 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10719 local_got_refcounts = bfd_zalloc (abfd, size);
10720 if (local_got_refcounts == NULL)
10721 return FALSE;
10722 elf_local_got_refcounts (abfd) = local_got_refcounts;
10723 elf32_arm_local_got_tls_type (abfd)
10724 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10725 }
10726 local_got_refcounts[r_symndx] += 1;
10727 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10728 }
10729
10730 /* We will already have issued an error message if there is a
10731 TLS / non-TLS mismatch, based on the symbol type. We don't
10732 support any linker relaxations. So just combine any TLS
10733 types needed. */
10734 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10735 && tls_type != GOT_NORMAL)
10736 tls_type |= old_tls_type;
10737
10738 if (old_tls_type != tls_type)
10739 {
10740 if (h != NULL)
10741 elf32_arm_hash_entry (h)->tls_type = tls_type;
10742 else
10743 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10744 }
10745 }
10746 /* Fall through. */
10747
10748 case R_ARM_TLS_LDM32:
10749 if (r_type == R_ARM_TLS_LDM32)
10750 htab->tls_ldm_got.refcount++;
10751 /* Fall through. */
10752
10753 case R_ARM_GOTOFF32:
10754 case R_ARM_GOTPC:
10755 if (htab->sgot == NULL)
10756 {
10757 if (htab->root.dynobj == NULL)
10758 htab->root.dynobj = abfd;
10759 if (!create_got_section (htab->root.dynobj, info))
10760 return FALSE;
10761 }
10762 break;
10763
10764 case R_ARM_ABS12:
10765 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10766 ldr __GOTT_INDEX__ offsets. */
10767 if (!htab->vxworks_p)
10768 break;
10769 /* Fall through. */
10770
10771 case R_ARM_PC24:
10772 case R_ARM_PLT32:
10773 case R_ARM_CALL:
10774 case R_ARM_JUMP24:
10775 case R_ARM_PREL31:
10776 case R_ARM_THM_CALL:
10777 case R_ARM_THM_JUMP24:
10778 case R_ARM_THM_JUMP19:
10779 needs_plt = 1;
10780 goto normal_reloc;
10781
10782 case R_ARM_MOVW_ABS_NC:
10783 case R_ARM_MOVT_ABS:
10784 case R_ARM_THM_MOVW_ABS_NC:
10785 case R_ARM_THM_MOVT_ABS:
10786 if (info->shared)
10787 {
10788 (*_bfd_error_handler)
10789 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10790 abfd, elf32_arm_howto_table_1[r_type].name,
10791 (h) ? h->root.root.string : "a local symbol");
10792 bfd_set_error (bfd_error_bad_value);
10793 return FALSE;
10794 }
10795
10796 /* Fall through. */
10797 case R_ARM_ABS32:
10798 case R_ARM_ABS32_NOI:
10799 case R_ARM_REL32:
10800 case R_ARM_REL32_NOI:
10801 case R_ARM_MOVW_PREL_NC:
10802 case R_ARM_MOVT_PREL:
10803 case R_ARM_THM_MOVW_PREL_NC:
10804 case R_ARM_THM_MOVT_PREL:
10805 needs_plt = 0;
10806 normal_reloc:
10807
10808 /* Should the interworking branches be listed here? */
10809 if (h != NULL)
10810 {
10811 /* If this reloc is in a read-only section, we might
10812 need a copy reloc. We can't check reliably at this
10813 stage whether the section is read-only, as input
10814 sections have not yet been mapped to output sections.
10815 Tentatively set the flag for now, and correct in
10816 adjust_dynamic_symbol. */
10817 if (!info->shared)
10818 h->non_got_ref = 1;
10819
10820 /* We may need a .plt entry if the function this reloc
10821 refers to is in a different object. We can't tell for
10822 sure yet, because something later might force the
10823 symbol local. */
10824 if (needs_plt)
10825 h->needs_plt = 1;
10826
10827 /* If we create a PLT entry, this relocation will reference
10828 it, even if it's an ABS32 relocation. */
10829 h->plt.refcount += 1;
10830
10831 /* It's too early to use htab->use_blx here, so we have to
10832 record possible blx references separately from
10833 relocs that definitely need a thumb stub. */
10834
10835 if (r_type == R_ARM_THM_CALL)
10836 eh->plt_maybe_thumb_refcount += 1;
10837
10838 if (r_type == R_ARM_THM_JUMP24
10839 || r_type == R_ARM_THM_JUMP19)
10840 eh->plt_thumb_refcount += 1;
10841 }
10842
10843 /* If we are creating a shared library or relocatable executable,
10844 and this is a reloc against a global symbol, or a non PC
10845 relative reloc against a local symbol, then we need to copy
10846 the reloc into the shared library. However, if we are linking
10847 with -Bsymbolic, we do not need to copy a reloc against a
10848 global symbol which is defined in an object we are
10849 including in the link (i.e., DEF_REGULAR is set). At
10850 this point we have not seen all the input files, so it is
10851 possible that DEF_REGULAR is not set now but will be set
10852 later (it is never cleared). We account for that
10853 possibility below by storing information in the
10854 relocs_copied field of the hash table entry. */
10855 if ((info->shared || htab->root.is_relocatable_executable)
10856 && (sec->flags & SEC_ALLOC) != 0
10857 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10858 || (h != NULL && ! h->needs_plt
10859 && (! info->symbolic || ! h->def_regular))))
10860 {
10861 struct elf32_arm_relocs_copied *p, **head;
10862
10863 /* When creating a shared object, we must copy these
10864 reloc types into the output file. We create a reloc
10865 section in dynobj and make room for this reloc. */
10866 if (sreloc == NULL)
10867 {
10868 sreloc = _bfd_elf_make_dynamic_reloc_section
10869 (sec, dynobj, 2, abfd, ! htab->use_rel);
10870
10871 if (sreloc == NULL)
10872 return FALSE;
10873
10874 /* BPABI objects never have dynamic relocations mapped. */
10875 if (htab->symbian_p)
10876 {
10877 flagword flags;
10878
10879 flags = bfd_get_section_flags (dynobj, sreloc);
10880 flags &= ~(SEC_LOAD | SEC_ALLOC);
10881 bfd_set_section_flags (dynobj, sreloc, flags);
10882 }
10883 }
10884
10885 /* If this is a global symbol, we count the number of
10886 relocations we need for this symbol. */
10887 if (h != NULL)
10888 {
10889 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10890 }
10891 else
10892 {
10893 /* Track dynamic relocs needed for local syms too.
10894 We really need local syms available to do this
10895 easily. Oh well. */
10896 asection *s;
10897 void *vpp;
10898 Elf_Internal_Sym *isym;
10899
10900 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10901 abfd, r_symndx);
10902 if (isym == NULL)
10903 return FALSE;
10904
10905 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10906 if (s == NULL)
10907 s = sec;
10908
10909 vpp = &elf_section_data (s)->local_dynrel;
10910 head = (struct elf32_arm_relocs_copied **) vpp;
10911 }
10912
10913 p = *head;
10914 if (p == NULL || p->section != sec)
10915 {
10916 bfd_size_type amt = sizeof *p;
10917
10918 p = bfd_alloc (htab->root.dynobj, amt);
10919 if (p == NULL)
10920 return FALSE;
10921 p->next = *head;
10922 *head = p;
10923 p->section = sec;
10924 p->count = 0;
10925 p->pc_count = 0;
10926 }
10927
10928 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10929 p->pc_count += 1;
10930 p->count += 1;
10931 }
10932 break;
10933
10934 /* This relocation describes the C++ object vtable hierarchy.
10935 Reconstruct it for later use during GC. */
10936 case R_ARM_GNU_VTINHERIT:
10937 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10938 return FALSE;
10939 break;
10940
10941 /* This relocation describes which C++ vtable entries are actually
10942 used. Record for later use during GC. */
10943 case R_ARM_GNU_VTENTRY:
10944 BFD_ASSERT (h != NULL);
10945 if (h != NULL
10946 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10947 return FALSE;
10948 break;
10949 }
10950 }
10951
10952 return TRUE;
10953 }
10954
10955 /* Unwinding tables are not referenced directly. This pass marks them as
10956 required if the corresponding code section is marked. */
10957
10958 static bfd_boolean
10959 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10960 elf_gc_mark_hook_fn gc_mark_hook)
10961 {
10962 bfd *sub;
10963 Elf_Internal_Shdr **elf_shdrp;
10964 bfd_boolean again;
10965
10966 /* Marking EH data may cause additional code sections to be marked,
10967 requiring multiple passes. */
10968 again = TRUE;
10969 while (again)
10970 {
10971 again = FALSE;
10972 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10973 {
10974 asection *o;
10975
10976 if (! is_arm_elf (sub))
10977 continue;
10978
10979 elf_shdrp = elf_elfsections (sub);
10980 for (o = sub->sections; o != NULL; o = o->next)
10981 {
10982 Elf_Internal_Shdr *hdr;
10983
10984 hdr = &elf_section_data (o)->this_hdr;
10985 if (hdr->sh_type == SHT_ARM_EXIDX
10986 && hdr->sh_link
10987 && hdr->sh_link < elf_numsections (sub)
10988 && !o->gc_mark
10989 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10990 {
10991 again = TRUE;
10992 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10993 return FALSE;
10994 }
10995 }
10996 }
10997 }
10998
10999 return TRUE;
11000 }
11001
11002 /* Treat mapping symbols as special target symbols. */
11003
11004 static bfd_boolean
11005 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11006 {
11007 return bfd_is_arm_special_symbol_name (sym->name,
11008 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11009 }
11010
11011 /* This is a copy of elf_find_function() from elf.c except that
11012 ARM mapping symbols are ignored when looking for function names
11013 and STT_ARM_TFUNC is considered to a function type. */
11014
11015 static bfd_boolean
11016 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11017 asection * section,
11018 asymbol ** symbols,
11019 bfd_vma offset,
11020 const char ** filename_ptr,
11021 const char ** functionname_ptr)
11022 {
11023 const char * filename = NULL;
11024 asymbol * func = NULL;
11025 bfd_vma low_func = 0;
11026 asymbol ** p;
11027
11028 for (p = symbols; *p != NULL; p++)
11029 {
11030 elf_symbol_type *q;
11031
11032 q = (elf_symbol_type *) *p;
11033
11034 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11035 {
11036 default:
11037 break;
11038 case STT_FILE:
11039 filename = bfd_asymbol_name (&q->symbol);
11040 break;
11041 case STT_FUNC:
11042 case STT_ARM_TFUNC:
11043 case STT_NOTYPE:
11044 /* Skip mapping symbols. */
11045 if ((q->symbol.flags & BSF_LOCAL)
11046 && bfd_is_arm_special_symbol_name (q->symbol.name,
11047 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11048 continue;
11049 /* Fall through. */
11050 if (bfd_get_section (&q->symbol) == section
11051 && q->symbol.value >= low_func
11052 && q->symbol.value <= offset)
11053 {
11054 func = (asymbol *) q;
11055 low_func = q->symbol.value;
11056 }
11057 break;
11058 }
11059 }
11060
11061 if (func == NULL)
11062 return FALSE;
11063
11064 if (filename_ptr)
11065 *filename_ptr = filename;
11066 if (functionname_ptr)
11067 *functionname_ptr = bfd_asymbol_name (func);
11068
11069 return TRUE;
11070 }
11071
11072
11073 /* Find the nearest line to a particular section and offset, for error
11074 reporting. This code is a duplicate of the code in elf.c, except
11075 that it uses arm_elf_find_function. */
11076
11077 static bfd_boolean
11078 elf32_arm_find_nearest_line (bfd * abfd,
11079 asection * section,
11080 asymbol ** symbols,
11081 bfd_vma offset,
11082 const char ** filename_ptr,
11083 const char ** functionname_ptr,
11084 unsigned int * line_ptr)
11085 {
11086 bfd_boolean found = FALSE;
11087
11088 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11089
11090 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11091 filename_ptr, functionname_ptr,
11092 line_ptr, 0,
11093 & elf_tdata (abfd)->dwarf2_find_line_info))
11094 {
11095 if (!*functionname_ptr)
11096 arm_elf_find_function (abfd, section, symbols, offset,
11097 *filename_ptr ? NULL : filename_ptr,
11098 functionname_ptr);
11099
11100 return TRUE;
11101 }
11102
11103 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11104 & found, filename_ptr,
11105 functionname_ptr, line_ptr,
11106 & elf_tdata (abfd)->line_info))
11107 return FALSE;
11108
11109 if (found && (*functionname_ptr || *line_ptr))
11110 return TRUE;
11111
11112 if (symbols == NULL)
11113 return FALSE;
11114
11115 if (! arm_elf_find_function (abfd, section, symbols, offset,
11116 filename_ptr, functionname_ptr))
11117 return FALSE;
11118
11119 *line_ptr = 0;
11120 return TRUE;
11121 }
11122
11123 static bfd_boolean
11124 elf32_arm_find_inliner_info (bfd * abfd,
11125 const char ** filename_ptr,
11126 const char ** functionname_ptr,
11127 unsigned int * line_ptr)
11128 {
11129 bfd_boolean found;
11130 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11131 functionname_ptr, line_ptr,
11132 & elf_tdata (abfd)->dwarf2_find_line_info);
11133 return found;
11134 }
11135
11136 /* Adjust a symbol defined by a dynamic object and referenced by a
11137 regular object. The current definition is in some section of the
11138 dynamic object, but we're not including those sections. We have to
11139 change the definition to something the rest of the link can
11140 understand. */
11141
11142 static bfd_boolean
11143 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11144 struct elf_link_hash_entry * h)
11145 {
11146 bfd * dynobj;
11147 asection * s;
11148 struct elf32_arm_link_hash_entry * eh;
11149 struct elf32_arm_link_hash_table *globals;
11150
11151 globals = elf32_arm_hash_table (info);
11152 dynobj = elf_hash_table (info)->dynobj;
11153
11154 /* Make sure we know what is going on here. */
11155 BFD_ASSERT (dynobj != NULL
11156 && (h->needs_plt
11157 || h->u.weakdef != NULL
11158 || (h->def_dynamic
11159 && h->ref_regular
11160 && !h->def_regular)));
11161
11162 eh = (struct elf32_arm_link_hash_entry *) h;
11163
11164 /* If this is a function, put it in the procedure linkage table. We
11165 will fill in the contents of the procedure linkage table later,
11166 when we know the address of the .got section. */
11167 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11168 || h->needs_plt)
11169 {
11170 if (h->plt.refcount <= 0
11171 || SYMBOL_CALLS_LOCAL (info, h)
11172 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11173 && h->root.type == bfd_link_hash_undefweak))
11174 {
11175 /* This case can occur if we saw a PLT32 reloc in an input
11176 file, but the symbol was never referred to by a dynamic
11177 object, or if all references were garbage collected. In
11178 such a case, we don't actually need to build a procedure
11179 linkage table, and we can just do a PC24 reloc instead. */
11180 h->plt.offset = (bfd_vma) -1;
11181 eh->plt_thumb_refcount = 0;
11182 eh->plt_maybe_thumb_refcount = 0;
11183 h->needs_plt = 0;
11184 }
11185
11186 return TRUE;
11187 }
11188 else
11189 {
11190 /* It's possible that we incorrectly decided a .plt reloc was
11191 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11192 in check_relocs. We can't decide accurately between function
11193 and non-function syms in check-relocs; Objects loaded later in
11194 the link may change h->type. So fix it now. */
11195 h->plt.offset = (bfd_vma) -1;
11196 eh->plt_thumb_refcount = 0;
11197 eh->plt_maybe_thumb_refcount = 0;
11198 }
11199
11200 /* If this is a weak symbol, and there is a real definition, the
11201 processor independent code will have arranged for us to see the
11202 real definition first, and we can just use the same value. */
11203 if (h->u.weakdef != NULL)
11204 {
11205 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11206 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11207 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11208 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11209 return TRUE;
11210 }
11211
11212 /* If there are no non-GOT references, we do not need a copy
11213 relocation. */
11214 if (!h->non_got_ref)
11215 return TRUE;
11216
11217 /* This is a reference to a symbol defined by a dynamic object which
11218 is not a function. */
11219
11220 /* If we are creating a shared library, we must presume that the
11221 only references to the symbol are via the global offset table.
11222 For such cases we need not do anything here; the relocations will
11223 be handled correctly by relocate_section. Relocatable executables
11224 can reference data in shared objects directly, so we don't need to
11225 do anything here. */
11226 if (info->shared || globals->root.is_relocatable_executable)
11227 return TRUE;
11228
11229 if (h->size == 0)
11230 {
11231 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11232 h->root.root.string);
11233 return TRUE;
11234 }
11235
11236 /* We must allocate the symbol in our .dynbss section, which will
11237 become part of the .bss section of the executable. There will be
11238 an entry for this symbol in the .dynsym section. The dynamic
11239 object will contain position independent code, so all references
11240 from the dynamic object to this symbol will go through the global
11241 offset table. The dynamic linker will use the .dynsym entry to
11242 determine the address it must put in the global offset table, so
11243 both the dynamic object and the regular object will refer to the
11244 same memory location for the variable. */
11245 s = bfd_get_section_by_name (dynobj, ".dynbss");
11246 BFD_ASSERT (s != NULL);
11247
11248 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11249 copy the initial value out of the dynamic object and into the
11250 runtime process image. We need to remember the offset into the
11251 .rel(a).bss section we are going to use. */
11252 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11253 {
11254 asection *srel;
11255
11256 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11257 BFD_ASSERT (srel != NULL);
11258 srel->size += RELOC_SIZE (globals);
11259 h->needs_copy = 1;
11260 }
11261
11262 return _bfd_elf_adjust_dynamic_copy (h, s);
11263 }
11264
11265 /* Allocate space in .plt, .got and associated reloc sections for
11266 dynamic relocs. */
11267
11268 static bfd_boolean
11269 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11270 {
11271 struct bfd_link_info *info;
11272 struct elf32_arm_link_hash_table *htab;
11273 struct elf32_arm_link_hash_entry *eh;
11274 struct elf32_arm_relocs_copied *p;
11275 bfd_signed_vma thumb_refs;
11276
11277 eh = (struct elf32_arm_link_hash_entry *) h;
11278
11279 if (h->root.type == bfd_link_hash_indirect)
11280 return TRUE;
11281
11282 if (h->root.type == bfd_link_hash_warning)
11283 /* When warning symbols are created, they **replace** the "real"
11284 entry in the hash table, thus we never get to see the real
11285 symbol in a hash traversal. So look at it now. */
11286 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11287
11288 info = (struct bfd_link_info *) inf;
11289 htab = elf32_arm_hash_table (info);
11290
11291 if (htab->root.dynamic_sections_created
11292 && h->plt.refcount > 0)
11293 {
11294 /* Make sure this symbol is output as a dynamic symbol.
11295 Undefined weak syms won't yet be marked as dynamic. */
11296 if (h->dynindx == -1
11297 && !h->forced_local)
11298 {
11299 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11300 return FALSE;
11301 }
11302
11303 if (info->shared
11304 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11305 {
11306 asection *s = htab->splt;
11307
11308 /* If this is the first .plt entry, make room for the special
11309 first entry. */
11310 if (s->size == 0)
11311 s->size += htab->plt_header_size;
11312
11313 h->plt.offset = s->size;
11314
11315 /* If we will insert a Thumb trampoline before this PLT, leave room
11316 for it. */
11317 thumb_refs = eh->plt_thumb_refcount;
11318 if (!htab->use_blx)
11319 thumb_refs += eh->plt_maybe_thumb_refcount;
11320
11321 if (thumb_refs > 0)
11322 {
11323 h->plt.offset += PLT_THUMB_STUB_SIZE;
11324 s->size += PLT_THUMB_STUB_SIZE;
11325 }
11326
11327 /* If this symbol is not defined in a regular file, and we are
11328 not generating a shared library, then set the symbol to this
11329 location in the .plt. This is required to make function
11330 pointers compare as equal between the normal executable and
11331 the shared library. */
11332 if (! info->shared
11333 && !h->def_regular)
11334 {
11335 h->root.u.def.section = s;
11336 h->root.u.def.value = h->plt.offset;
11337 }
11338
11339 /* Make sure the function is not marked as Thumb, in case
11340 it is the target of an ABS32 relocation, which will
11341 point to the PLT entry. */
11342 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11343 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11344
11345 /* Make room for this entry. */
11346 s->size += htab->plt_entry_size;
11347
11348 if (!htab->symbian_p)
11349 {
11350 /* We also need to make an entry in the .got.plt section, which
11351 will be placed in the .got section by the linker script. */
11352 eh->plt_got_offset = htab->sgotplt->size;
11353 htab->sgotplt->size += 4;
11354 }
11355
11356 /* We also need to make an entry in the .rel(a).plt section. */
11357 htab->srelplt->size += RELOC_SIZE (htab);
11358
11359 /* VxWorks executables have a second set of relocations for
11360 each PLT entry. They go in a separate relocation section,
11361 which is processed by the kernel loader. */
11362 if (htab->vxworks_p && !info->shared)
11363 {
11364 /* There is a relocation for the initial PLT entry:
11365 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11366 if (h->plt.offset == htab->plt_header_size)
11367 htab->srelplt2->size += RELOC_SIZE (htab);
11368
11369 /* There are two extra relocations for each subsequent
11370 PLT entry: an R_ARM_32 relocation for the GOT entry,
11371 and an R_ARM_32 relocation for the PLT entry. */
11372 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11373 }
11374 }
11375 else
11376 {
11377 h->plt.offset = (bfd_vma) -1;
11378 h->needs_plt = 0;
11379 }
11380 }
11381 else
11382 {
11383 h->plt.offset = (bfd_vma) -1;
11384 h->needs_plt = 0;
11385 }
11386
11387 if (h->got.refcount > 0)
11388 {
11389 asection *s;
11390 bfd_boolean dyn;
11391 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11392 int indx;
11393
11394 /* Make sure this symbol is output as a dynamic symbol.
11395 Undefined weak syms won't yet be marked as dynamic. */
11396 if (h->dynindx == -1
11397 && !h->forced_local)
11398 {
11399 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11400 return FALSE;
11401 }
11402
11403 if (!htab->symbian_p)
11404 {
11405 s = htab->sgot;
11406 h->got.offset = s->size;
11407
11408 if (tls_type == GOT_UNKNOWN)
11409 abort ();
11410
11411 if (tls_type == GOT_NORMAL)
11412 /* Non-TLS symbols need one GOT slot. */
11413 s->size += 4;
11414 else
11415 {
11416 if (tls_type & GOT_TLS_GD)
11417 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11418 s->size += 8;
11419 if (tls_type & GOT_TLS_IE)
11420 /* R_ARM_TLS_IE32 needs one GOT slot. */
11421 s->size += 4;
11422 }
11423
11424 dyn = htab->root.dynamic_sections_created;
11425
11426 indx = 0;
11427 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11428 && (!info->shared
11429 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11430 indx = h->dynindx;
11431
11432 if (tls_type != GOT_NORMAL
11433 && (info->shared || indx != 0)
11434 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11435 || h->root.type != bfd_link_hash_undefweak))
11436 {
11437 if (tls_type & GOT_TLS_IE)
11438 htab->srelgot->size += RELOC_SIZE (htab);
11439
11440 if (tls_type & GOT_TLS_GD)
11441 htab->srelgot->size += RELOC_SIZE (htab);
11442
11443 if ((tls_type & GOT_TLS_GD) && indx != 0)
11444 htab->srelgot->size += RELOC_SIZE (htab);
11445 }
11446 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11447 || h->root.type != bfd_link_hash_undefweak)
11448 && (info->shared
11449 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11450 htab->srelgot->size += RELOC_SIZE (htab);
11451 }
11452 }
11453 else
11454 h->got.offset = (bfd_vma) -1;
11455
11456 /* Allocate stubs for exported Thumb functions on v4t. */
11457 if (!htab->use_blx && h->dynindx != -1
11458 && h->def_regular
11459 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11460 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11461 {
11462 struct elf_link_hash_entry * th;
11463 struct bfd_link_hash_entry * bh;
11464 struct elf_link_hash_entry * myh;
11465 char name[1024];
11466 asection *s;
11467 bh = NULL;
11468 /* Create a new symbol to regist the real location of the function. */
11469 s = h->root.u.def.section;
11470 sprintf (name, "__real_%s", h->root.root.string);
11471 _bfd_generic_link_add_one_symbol (info, s->owner,
11472 name, BSF_GLOBAL, s,
11473 h->root.u.def.value,
11474 NULL, TRUE, FALSE, &bh);
11475
11476 myh = (struct elf_link_hash_entry *) bh;
11477 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11478 myh->forced_local = 1;
11479 eh->export_glue = myh;
11480 th = record_arm_to_thumb_glue (info, h);
11481 /* Point the symbol at the stub. */
11482 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11483 h->root.u.def.section = th->root.u.def.section;
11484 h->root.u.def.value = th->root.u.def.value & ~1;
11485 }
11486
11487 if (eh->relocs_copied == NULL)
11488 return TRUE;
11489
11490 /* In the shared -Bsymbolic case, discard space allocated for
11491 dynamic pc-relative relocs against symbols which turn out to be
11492 defined in regular objects. For the normal shared case, discard
11493 space for pc-relative relocs that have become local due to symbol
11494 visibility changes. */
11495
11496 if (info->shared || htab->root.is_relocatable_executable)
11497 {
11498 /* The only relocs that use pc_count are R_ARM_REL32 and
11499 R_ARM_REL32_NOI, which will appear on something like
11500 ".long foo - .". We want calls to protected symbols to resolve
11501 directly to the function rather than going via the plt. If people
11502 want function pointer comparisons to work as expected then they
11503 should avoid writing assembly like ".long foo - .". */
11504 if (SYMBOL_CALLS_LOCAL (info, h))
11505 {
11506 struct elf32_arm_relocs_copied **pp;
11507
11508 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11509 {
11510 p->count -= p->pc_count;
11511 p->pc_count = 0;
11512 if (p->count == 0)
11513 *pp = p->next;
11514 else
11515 pp = &p->next;
11516 }
11517 }
11518
11519 if (elf32_arm_hash_table (info)->vxworks_p)
11520 {
11521 struct elf32_arm_relocs_copied **pp;
11522
11523 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11524 {
11525 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11526 *pp = p->next;
11527 else
11528 pp = &p->next;
11529 }
11530 }
11531
11532 /* Also discard relocs on undefined weak syms with non-default
11533 visibility. */
11534 if (eh->relocs_copied != NULL
11535 && h->root.type == bfd_link_hash_undefweak)
11536 {
11537 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11538 eh->relocs_copied = NULL;
11539
11540 /* Make sure undefined weak symbols are output as a dynamic
11541 symbol in PIEs. */
11542 else if (h->dynindx == -1
11543 && !h->forced_local)
11544 {
11545 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11546 return FALSE;
11547 }
11548 }
11549
11550 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11551 && h->root.type == bfd_link_hash_new)
11552 {
11553 /* Output absolute symbols so that we can create relocations
11554 against them. For normal symbols we output a relocation
11555 against the section that contains them. */
11556 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11557 return FALSE;
11558 }
11559
11560 }
11561 else
11562 {
11563 /* For the non-shared case, discard space for relocs against
11564 symbols which turn out to need copy relocs or are not
11565 dynamic. */
11566
11567 if (!h->non_got_ref
11568 && ((h->def_dynamic
11569 && !h->def_regular)
11570 || (htab->root.dynamic_sections_created
11571 && (h->root.type == bfd_link_hash_undefweak
11572 || h->root.type == bfd_link_hash_undefined))))
11573 {
11574 /* Make sure this symbol is output as a dynamic symbol.
11575 Undefined weak syms won't yet be marked as dynamic. */
11576 if (h->dynindx == -1
11577 && !h->forced_local)
11578 {
11579 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11580 return FALSE;
11581 }
11582
11583 /* If that succeeded, we know we'll be keeping all the
11584 relocs. */
11585 if (h->dynindx != -1)
11586 goto keep;
11587 }
11588
11589 eh->relocs_copied = NULL;
11590
11591 keep: ;
11592 }
11593
11594 /* Finally, allocate space. */
11595 for (p = eh->relocs_copied; p != NULL; p = p->next)
11596 {
11597 asection *sreloc = elf_section_data (p->section)->sreloc;
11598 sreloc->size += p->count * RELOC_SIZE (htab);
11599 }
11600
11601 return TRUE;
11602 }
11603
11604 /* Find any dynamic relocs that apply to read-only sections. */
11605
11606 static bfd_boolean
11607 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11608 {
11609 struct elf32_arm_link_hash_entry * eh;
11610 struct elf32_arm_relocs_copied * p;
11611
11612 if (h->root.type == bfd_link_hash_warning)
11613 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11614
11615 eh = (struct elf32_arm_link_hash_entry *) h;
11616 for (p = eh->relocs_copied; p != NULL; p = p->next)
11617 {
11618 asection *s = p->section;
11619
11620 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11621 {
11622 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11623
11624 info->flags |= DF_TEXTREL;
11625
11626 /* Not an error, just cut short the traversal. */
11627 return FALSE;
11628 }
11629 }
11630 return TRUE;
11631 }
11632
11633 void
11634 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11635 int byteswap_code)
11636 {
11637 struct elf32_arm_link_hash_table *globals;
11638
11639 globals = elf32_arm_hash_table (info);
11640 globals->byteswap_code = byteswap_code;
11641 }
11642
11643 /* Set the sizes of the dynamic sections. */
11644
11645 static bfd_boolean
11646 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11647 struct bfd_link_info * info)
11648 {
11649 bfd * dynobj;
11650 asection * s;
11651 bfd_boolean plt;
11652 bfd_boolean relocs;
11653 bfd *ibfd;
11654 struct elf32_arm_link_hash_table *htab;
11655
11656 htab = elf32_arm_hash_table (info);
11657 dynobj = elf_hash_table (info)->dynobj;
11658 BFD_ASSERT (dynobj != NULL);
11659 check_use_blx (htab);
11660
11661 if (elf_hash_table (info)->dynamic_sections_created)
11662 {
11663 /* Set the contents of the .interp section to the interpreter. */
11664 if (info->executable)
11665 {
11666 s = bfd_get_section_by_name (dynobj, ".interp");
11667 BFD_ASSERT (s != NULL);
11668 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11669 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11670 }
11671 }
11672
11673 /* Set up .got offsets for local syms, and space for local dynamic
11674 relocs. */
11675 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11676 {
11677 bfd_signed_vma *local_got;
11678 bfd_signed_vma *end_local_got;
11679 char *local_tls_type;
11680 bfd_size_type locsymcount;
11681 Elf_Internal_Shdr *symtab_hdr;
11682 asection *srel;
11683 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11684
11685 if (! is_arm_elf (ibfd))
11686 continue;
11687
11688 for (s = ibfd->sections; s != NULL; s = s->next)
11689 {
11690 struct elf32_arm_relocs_copied *p;
11691
11692 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11693 {
11694 if (!bfd_is_abs_section (p->section)
11695 && bfd_is_abs_section (p->section->output_section))
11696 {
11697 /* Input section has been discarded, either because
11698 it is a copy of a linkonce section or due to
11699 linker script /DISCARD/, so we'll be discarding
11700 the relocs too. */
11701 }
11702 else if (is_vxworks
11703 && strcmp (p->section->output_section->name,
11704 ".tls_vars") == 0)
11705 {
11706 /* Relocations in vxworks .tls_vars sections are
11707 handled specially by the loader. */
11708 }
11709 else if (p->count != 0)
11710 {
11711 srel = elf_section_data (p->section)->sreloc;
11712 srel->size += p->count * RELOC_SIZE (htab);
11713 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11714 info->flags |= DF_TEXTREL;
11715 }
11716 }
11717 }
11718
11719 local_got = elf_local_got_refcounts (ibfd);
11720 if (!local_got)
11721 continue;
11722
11723 symtab_hdr = & elf_symtab_hdr (ibfd);
11724 locsymcount = symtab_hdr->sh_info;
11725 end_local_got = local_got + locsymcount;
11726 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11727 s = htab->sgot;
11728 srel = htab->srelgot;
11729 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11730 {
11731 if (*local_got > 0)
11732 {
11733 *local_got = s->size;
11734 if (*local_tls_type & GOT_TLS_GD)
11735 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11736 s->size += 8;
11737 if (*local_tls_type & GOT_TLS_IE)
11738 s->size += 4;
11739 if (*local_tls_type == GOT_NORMAL)
11740 s->size += 4;
11741
11742 if (info->shared || *local_tls_type == GOT_TLS_GD)
11743 srel->size += RELOC_SIZE (htab);
11744 }
11745 else
11746 *local_got = (bfd_vma) -1;
11747 }
11748 }
11749
11750 if (htab->tls_ldm_got.refcount > 0)
11751 {
11752 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11753 for R_ARM_TLS_LDM32 relocations. */
11754 htab->tls_ldm_got.offset = htab->sgot->size;
11755 htab->sgot->size += 8;
11756 if (info->shared)
11757 htab->srelgot->size += RELOC_SIZE (htab);
11758 }
11759 else
11760 htab->tls_ldm_got.offset = -1;
11761
11762 /* Allocate global sym .plt and .got entries, and space for global
11763 sym dynamic relocs. */
11764 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11765
11766 /* Here we rummage through the found bfds to collect glue information. */
11767 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11768 {
11769 if (! is_arm_elf (ibfd))
11770 continue;
11771
11772 /* Initialise mapping tables for code/data. */
11773 bfd_elf32_arm_init_maps (ibfd);
11774
11775 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11776 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11777 /* xgettext:c-format */
11778 _bfd_error_handler (_("Errors encountered processing file %s"),
11779 ibfd->filename);
11780 }
11781
11782 /* Allocate space for the glue sections now that we've sized them. */
11783 bfd_elf32_arm_allocate_interworking_sections (info);
11784
11785 /* The check_relocs and adjust_dynamic_symbol entry points have
11786 determined the sizes of the various dynamic sections. Allocate
11787 memory for them. */
11788 plt = FALSE;
11789 relocs = FALSE;
11790 for (s = dynobj->sections; s != NULL; s = s->next)
11791 {
11792 const char * name;
11793
11794 if ((s->flags & SEC_LINKER_CREATED) == 0)
11795 continue;
11796
11797 /* It's OK to base decisions on the section name, because none
11798 of the dynobj section names depend upon the input files. */
11799 name = bfd_get_section_name (dynobj, s);
11800
11801 if (strcmp (name, ".plt") == 0)
11802 {
11803 /* Remember whether there is a PLT. */
11804 plt = s->size != 0;
11805 }
11806 else if (CONST_STRNEQ (name, ".rel"))
11807 {
11808 if (s->size != 0)
11809 {
11810 /* Remember whether there are any reloc sections other
11811 than .rel(a).plt and .rela.plt.unloaded. */
11812 if (s != htab->srelplt && s != htab->srelplt2)
11813 relocs = TRUE;
11814
11815 /* We use the reloc_count field as a counter if we need
11816 to copy relocs into the output file. */
11817 s->reloc_count = 0;
11818 }
11819 }
11820 else if (! CONST_STRNEQ (name, ".got")
11821 && strcmp (name, ".dynbss") != 0)
11822 {
11823 /* It's not one of our sections, so don't allocate space. */
11824 continue;
11825 }
11826
11827 if (s->size == 0)
11828 {
11829 /* If we don't need this section, strip it from the
11830 output file. This is mostly to handle .rel(a).bss and
11831 .rel(a).plt. We must create both sections in
11832 create_dynamic_sections, because they must be created
11833 before the linker maps input sections to output
11834 sections. The linker does that before
11835 adjust_dynamic_symbol is called, and it is that
11836 function which decides whether anything needs to go
11837 into these sections. */
11838 s->flags |= SEC_EXCLUDE;
11839 continue;
11840 }
11841
11842 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11843 continue;
11844
11845 /* Allocate memory for the section contents. */
11846 s->contents = bfd_zalloc (dynobj, s->size);
11847 if (s->contents == NULL)
11848 return FALSE;
11849 }
11850
11851 if (elf_hash_table (info)->dynamic_sections_created)
11852 {
11853 /* Add some entries to the .dynamic section. We fill in the
11854 values later, in elf32_arm_finish_dynamic_sections, but we
11855 must add the entries now so that we get the correct size for
11856 the .dynamic section. The DT_DEBUG entry is filled in by the
11857 dynamic linker and used by the debugger. */
11858 #define add_dynamic_entry(TAG, VAL) \
11859 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11860
11861 if (info->executable)
11862 {
11863 if (!add_dynamic_entry (DT_DEBUG, 0))
11864 return FALSE;
11865 }
11866
11867 if (plt)
11868 {
11869 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11870 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11871 || !add_dynamic_entry (DT_PLTREL,
11872 htab->use_rel ? DT_REL : DT_RELA)
11873 || !add_dynamic_entry (DT_JMPREL, 0))
11874 return FALSE;
11875 }
11876
11877 if (relocs)
11878 {
11879 if (htab->use_rel)
11880 {
11881 if (!add_dynamic_entry (DT_REL, 0)
11882 || !add_dynamic_entry (DT_RELSZ, 0)
11883 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11884 return FALSE;
11885 }
11886 else
11887 {
11888 if (!add_dynamic_entry (DT_RELA, 0)
11889 || !add_dynamic_entry (DT_RELASZ, 0)
11890 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11891 return FALSE;
11892 }
11893 }
11894
11895 /* If any dynamic relocs apply to a read-only section,
11896 then we need a DT_TEXTREL entry. */
11897 if ((info->flags & DF_TEXTREL) == 0)
11898 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11899 info);
11900
11901 if ((info->flags & DF_TEXTREL) != 0)
11902 {
11903 if (!add_dynamic_entry (DT_TEXTREL, 0))
11904 return FALSE;
11905 }
11906 if (htab->vxworks_p
11907 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11908 return FALSE;
11909 }
11910 #undef add_dynamic_entry
11911
11912 return TRUE;
11913 }
11914
11915 /* Finish up dynamic symbol handling. We set the contents of various
11916 dynamic sections here. */
11917
11918 static bfd_boolean
11919 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11920 struct bfd_link_info * info,
11921 struct elf_link_hash_entry * h,
11922 Elf_Internal_Sym * sym)
11923 {
11924 bfd * dynobj;
11925 struct elf32_arm_link_hash_table *htab;
11926 struct elf32_arm_link_hash_entry *eh;
11927
11928 dynobj = elf_hash_table (info)->dynobj;
11929 htab = elf32_arm_hash_table (info);
11930 eh = (struct elf32_arm_link_hash_entry *) h;
11931
11932 if (h->plt.offset != (bfd_vma) -1)
11933 {
11934 asection * splt;
11935 asection * srel;
11936 bfd_byte *loc;
11937 bfd_vma plt_index;
11938 Elf_Internal_Rela rel;
11939
11940 /* This symbol has an entry in the procedure linkage table. Set
11941 it up. */
11942
11943 BFD_ASSERT (h->dynindx != -1);
11944
11945 splt = bfd_get_section_by_name (dynobj, ".plt");
11946 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11947 BFD_ASSERT (splt != NULL && srel != NULL);
11948
11949 /* Fill in the entry in the procedure linkage table. */
11950 if (htab->symbian_p)
11951 {
11952 put_arm_insn (htab, output_bfd,
11953 elf32_arm_symbian_plt_entry[0],
11954 splt->contents + h->plt.offset);
11955 bfd_put_32 (output_bfd,
11956 elf32_arm_symbian_plt_entry[1],
11957 splt->contents + h->plt.offset + 4);
11958
11959 /* Fill in the entry in the .rel.plt section. */
11960 rel.r_offset = (splt->output_section->vma
11961 + splt->output_offset
11962 + h->plt.offset + 4);
11963 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11964
11965 /* Get the index in the procedure linkage table which
11966 corresponds to this symbol. This is the index of this symbol
11967 in all the symbols for which we are making plt entries. The
11968 first entry in the procedure linkage table is reserved. */
11969 plt_index = ((h->plt.offset - htab->plt_header_size)
11970 / htab->plt_entry_size);
11971 }
11972 else
11973 {
11974 bfd_vma got_offset, got_address, plt_address;
11975 bfd_vma got_displacement;
11976 asection * sgot;
11977 bfd_byte * ptr;
11978
11979 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11980 BFD_ASSERT (sgot != NULL);
11981
11982 /* Get the offset into the .got.plt table of the entry that
11983 corresponds to this function. */
11984 got_offset = eh->plt_got_offset;
11985
11986 /* Get the index in the procedure linkage table which
11987 corresponds to this symbol. This is the index of this symbol
11988 in all the symbols for which we are making plt entries. The
11989 first three entries in .got.plt are reserved; after that
11990 symbols appear in the same order as in .plt. */
11991 plt_index = (got_offset - 12) / 4;
11992
11993 /* Calculate the address of the GOT entry. */
11994 got_address = (sgot->output_section->vma
11995 + sgot->output_offset
11996 + got_offset);
11997
11998 /* ...and the address of the PLT entry. */
11999 plt_address = (splt->output_section->vma
12000 + splt->output_offset
12001 + h->plt.offset);
12002
12003 ptr = htab->splt->contents + h->plt.offset;
12004 if (htab->vxworks_p && info->shared)
12005 {
12006 unsigned int i;
12007 bfd_vma val;
12008
12009 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12010 {
12011 val = elf32_arm_vxworks_shared_plt_entry[i];
12012 if (i == 2)
12013 val |= got_address - sgot->output_section->vma;
12014 if (i == 5)
12015 val |= plt_index * RELOC_SIZE (htab);
12016 if (i == 2 || i == 5)
12017 bfd_put_32 (output_bfd, val, ptr);
12018 else
12019 put_arm_insn (htab, output_bfd, val, ptr);
12020 }
12021 }
12022 else if (htab->vxworks_p)
12023 {
12024 unsigned int i;
12025 bfd_vma val;
12026
12027 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12028 {
12029 val = elf32_arm_vxworks_exec_plt_entry[i];
12030 if (i == 2)
12031 val |= got_address;
12032 if (i == 4)
12033 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12034 if (i == 5)
12035 val |= plt_index * RELOC_SIZE (htab);
12036 if (i == 2 || i == 5)
12037 bfd_put_32 (output_bfd, val, ptr);
12038 else
12039 put_arm_insn (htab, output_bfd, val, ptr);
12040 }
12041
12042 loc = (htab->srelplt2->contents
12043 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12044
12045 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12046 referencing the GOT for this PLT entry. */
12047 rel.r_offset = plt_address + 8;
12048 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12049 rel.r_addend = got_offset;
12050 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12051 loc += RELOC_SIZE (htab);
12052
12053 /* Create the R_ARM_ABS32 relocation referencing the
12054 beginning of the PLT for this GOT entry. */
12055 rel.r_offset = got_address;
12056 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12057 rel.r_addend = 0;
12058 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12059 }
12060 else
12061 {
12062 bfd_signed_vma thumb_refs;
12063 /* Calculate the displacement between the PLT slot and the
12064 entry in the GOT. The eight-byte offset accounts for the
12065 value produced by adding to pc in the first instruction
12066 of the PLT stub. */
12067 got_displacement = got_address - (plt_address + 8);
12068
12069 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12070
12071 thumb_refs = eh->plt_thumb_refcount;
12072 if (!htab->use_blx)
12073 thumb_refs += eh->plt_maybe_thumb_refcount;
12074
12075 if (thumb_refs > 0)
12076 {
12077 put_thumb_insn (htab, output_bfd,
12078 elf32_arm_plt_thumb_stub[0], ptr - 4);
12079 put_thumb_insn (htab, output_bfd,
12080 elf32_arm_plt_thumb_stub[1], ptr - 2);
12081 }
12082
12083 put_arm_insn (htab, output_bfd,
12084 elf32_arm_plt_entry[0]
12085 | ((got_displacement & 0x0ff00000) >> 20),
12086 ptr + 0);
12087 put_arm_insn (htab, output_bfd,
12088 elf32_arm_plt_entry[1]
12089 | ((got_displacement & 0x000ff000) >> 12),
12090 ptr+ 4);
12091 put_arm_insn (htab, output_bfd,
12092 elf32_arm_plt_entry[2]
12093 | (got_displacement & 0x00000fff),
12094 ptr + 8);
12095 #ifdef FOUR_WORD_PLT
12096 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12097 #endif
12098 }
12099
12100 /* Fill in the entry in the global offset table. */
12101 bfd_put_32 (output_bfd,
12102 (splt->output_section->vma
12103 + splt->output_offset),
12104 sgot->contents + got_offset);
12105
12106 /* Fill in the entry in the .rel(a).plt section. */
12107 rel.r_addend = 0;
12108 rel.r_offset = got_address;
12109 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12110 }
12111
12112 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12113 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12114
12115 if (!h->def_regular)
12116 {
12117 /* Mark the symbol as undefined, rather than as defined in
12118 the .plt section. Leave the value alone. */
12119 sym->st_shndx = SHN_UNDEF;
12120 /* If the symbol is weak, we do need to clear the value.
12121 Otherwise, the PLT entry would provide a definition for
12122 the symbol even if the symbol wasn't defined anywhere,
12123 and so the symbol would never be NULL. */
12124 if (!h->ref_regular_nonweak)
12125 sym->st_value = 0;
12126 }
12127 }
12128
12129 if (h->got.offset != (bfd_vma) -1
12130 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12131 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12132 {
12133 asection * sgot;
12134 asection * srel;
12135 Elf_Internal_Rela rel;
12136 bfd_byte *loc;
12137 bfd_vma offset;
12138
12139 /* This symbol has an entry in the global offset table. Set it
12140 up. */
12141 sgot = bfd_get_section_by_name (dynobj, ".got");
12142 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12143 BFD_ASSERT (sgot != NULL && srel != NULL);
12144
12145 offset = (h->got.offset & ~(bfd_vma) 1);
12146 rel.r_addend = 0;
12147 rel.r_offset = (sgot->output_section->vma
12148 + sgot->output_offset
12149 + offset);
12150
12151 /* If this is a static link, or it is a -Bsymbolic link and the
12152 symbol is defined locally or was forced to be local because
12153 of a version file, we just want to emit a RELATIVE reloc.
12154 The entry in the global offset table will already have been
12155 initialized in the relocate_section function. */
12156 if (info->shared
12157 && SYMBOL_REFERENCES_LOCAL (info, h))
12158 {
12159 BFD_ASSERT ((h->got.offset & 1) != 0);
12160 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12161 if (!htab->use_rel)
12162 {
12163 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12164 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12165 }
12166 }
12167 else
12168 {
12169 BFD_ASSERT ((h->got.offset & 1) == 0);
12170 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12171 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12172 }
12173
12174 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12175 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12176 }
12177
12178 if (h->needs_copy)
12179 {
12180 asection * s;
12181 Elf_Internal_Rela rel;
12182 bfd_byte *loc;
12183
12184 /* This symbol needs a copy reloc. Set it up. */
12185 BFD_ASSERT (h->dynindx != -1
12186 && (h->root.type == bfd_link_hash_defined
12187 || h->root.type == bfd_link_hash_defweak));
12188
12189 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12190 RELOC_SECTION (htab, ".bss"));
12191 BFD_ASSERT (s != NULL);
12192
12193 rel.r_addend = 0;
12194 rel.r_offset = (h->root.u.def.value
12195 + h->root.u.def.section->output_section->vma
12196 + h->root.u.def.section->output_offset);
12197 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12198 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12199 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12200 }
12201
12202 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12203 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12204 to the ".got" section. */
12205 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12206 || (!htab->vxworks_p && h == htab->root.hgot))
12207 sym->st_shndx = SHN_ABS;
12208
12209 return TRUE;
12210 }
12211
12212 /* Finish up the dynamic sections. */
12213
12214 static bfd_boolean
12215 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12216 {
12217 bfd * dynobj;
12218 asection * sgot;
12219 asection * sdyn;
12220
12221 dynobj = elf_hash_table (info)->dynobj;
12222
12223 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12224 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12225 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12226
12227 if (elf_hash_table (info)->dynamic_sections_created)
12228 {
12229 asection *splt;
12230 Elf32_External_Dyn *dyncon, *dynconend;
12231 struct elf32_arm_link_hash_table *htab;
12232
12233 htab = elf32_arm_hash_table (info);
12234 splt = bfd_get_section_by_name (dynobj, ".plt");
12235 BFD_ASSERT (splt != NULL && sdyn != NULL);
12236
12237 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12238 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12239
12240 for (; dyncon < dynconend; dyncon++)
12241 {
12242 Elf_Internal_Dyn dyn;
12243 const char * name;
12244 asection * s;
12245
12246 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12247
12248 switch (dyn.d_tag)
12249 {
12250 unsigned int type;
12251
12252 default:
12253 if (htab->vxworks_p
12254 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12255 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12256 break;
12257
12258 case DT_HASH:
12259 name = ".hash";
12260 goto get_vma_if_bpabi;
12261 case DT_STRTAB:
12262 name = ".dynstr";
12263 goto get_vma_if_bpabi;
12264 case DT_SYMTAB:
12265 name = ".dynsym";
12266 goto get_vma_if_bpabi;
12267 case DT_VERSYM:
12268 name = ".gnu.version";
12269 goto get_vma_if_bpabi;
12270 case DT_VERDEF:
12271 name = ".gnu.version_d";
12272 goto get_vma_if_bpabi;
12273 case DT_VERNEED:
12274 name = ".gnu.version_r";
12275 goto get_vma_if_bpabi;
12276
12277 case DT_PLTGOT:
12278 name = ".got";
12279 goto get_vma;
12280 case DT_JMPREL:
12281 name = RELOC_SECTION (htab, ".plt");
12282 get_vma:
12283 s = bfd_get_section_by_name (output_bfd, name);
12284 BFD_ASSERT (s != NULL);
12285 if (!htab->symbian_p)
12286 dyn.d_un.d_ptr = s->vma;
12287 else
12288 /* In the BPABI, tags in the PT_DYNAMIC section point
12289 at the file offset, not the memory address, for the
12290 convenience of the post linker. */
12291 dyn.d_un.d_ptr = s->filepos;
12292 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12293 break;
12294
12295 get_vma_if_bpabi:
12296 if (htab->symbian_p)
12297 goto get_vma;
12298 break;
12299
12300 case DT_PLTRELSZ:
12301 s = bfd_get_section_by_name (output_bfd,
12302 RELOC_SECTION (htab, ".plt"));
12303 BFD_ASSERT (s != NULL);
12304 dyn.d_un.d_val = s->size;
12305 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12306 break;
12307
12308 case DT_RELSZ:
12309 case DT_RELASZ:
12310 if (!htab->symbian_p)
12311 {
12312 /* My reading of the SVR4 ABI indicates that the
12313 procedure linkage table relocs (DT_JMPREL) should be
12314 included in the overall relocs (DT_REL). This is
12315 what Solaris does. However, UnixWare can not handle
12316 that case. Therefore, we override the DT_RELSZ entry
12317 here to make it not include the JMPREL relocs. Since
12318 the linker script arranges for .rel(a).plt to follow all
12319 other relocation sections, we don't have to worry
12320 about changing the DT_REL entry. */
12321 s = bfd_get_section_by_name (output_bfd,
12322 RELOC_SECTION (htab, ".plt"));
12323 if (s != NULL)
12324 dyn.d_un.d_val -= s->size;
12325 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12326 break;
12327 }
12328 /* Fall through. */
12329
12330 case DT_REL:
12331 case DT_RELA:
12332 /* In the BPABI, the DT_REL tag must point at the file
12333 offset, not the VMA, of the first relocation
12334 section. So, we use code similar to that in
12335 elflink.c, but do not check for SHF_ALLOC on the
12336 relcoation section, since relocations sections are
12337 never allocated under the BPABI. The comments above
12338 about Unixware notwithstanding, we include all of the
12339 relocations here. */
12340 if (htab->symbian_p)
12341 {
12342 unsigned int i;
12343 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12344 ? SHT_REL : SHT_RELA);
12345 dyn.d_un.d_val = 0;
12346 for (i = 1; i < elf_numsections (output_bfd); i++)
12347 {
12348 Elf_Internal_Shdr *hdr
12349 = elf_elfsections (output_bfd)[i];
12350 if (hdr->sh_type == type)
12351 {
12352 if (dyn.d_tag == DT_RELSZ
12353 || dyn.d_tag == DT_RELASZ)
12354 dyn.d_un.d_val += hdr->sh_size;
12355 else if ((ufile_ptr) hdr->sh_offset
12356 <= dyn.d_un.d_val - 1)
12357 dyn.d_un.d_val = hdr->sh_offset;
12358 }
12359 }
12360 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12361 }
12362 break;
12363
12364 /* Set the bottom bit of DT_INIT/FINI if the
12365 corresponding function is Thumb. */
12366 case DT_INIT:
12367 name = info->init_function;
12368 goto get_sym;
12369 case DT_FINI:
12370 name = info->fini_function;
12371 get_sym:
12372 /* If it wasn't set by elf_bfd_final_link
12373 then there is nothing to adjust. */
12374 if (dyn.d_un.d_val != 0)
12375 {
12376 struct elf_link_hash_entry * eh;
12377
12378 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12379 FALSE, FALSE, TRUE);
12380 if (eh != NULL
12381 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12382 {
12383 dyn.d_un.d_val |= 1;
12384 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12385 }
12386 }
12387 break;
12388 }
12389 }
12390
12391 /* Fill in the first entry in the procedure linkage table. */
12392 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12393 {
12394 const bfd_vma *plt0_entry;
12395 bfd_vma got_address, plt_address, got_displacement;
12396
12397 /* Calculate the addresses of the GOT and PLT. */
12398 got_address = sgot->output_section->vma + sgot->output_offset;
12399 plt_address = splt->output_section->vma + splt->output_offset;
12400
12401 if (htab->vxworks_p)
12402 {
12403 /* The VxWorks GOT is relocated by the dynamic linker.
12404 Therefore, we must emit relocations rather than simply
12405 computing the values now. */
12406 Elf_Internal_Rela rel;
12407
12408 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12409 put_arm_insn (htab, output_bfd, plt0_entry[0],
12410 splt->contents + 0);
12411 put_arm_insn (htab, output_bfd, plt0_entry[1],
12412 splt->contents + 4);
12413 put_arm_insn (htab, output_bfd, plt0_entry[2],
12414 splt->contents + 8);
12415 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12416
12417 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12418 rel.r_offset = plt_address + 12;
12419 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12420 rel.r_addend = 0;
12421 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12422 htab->srelplt2->contents);
12423 }
12424 else
12425 {
12426 got_displacement = got_address - (plt_address + 16);
12427
12428 plt0_entry = elf32_arm_plt0_entry;
12429 put_arm_insn (htab, output_bfd, plt0_entry[0],
12430 splt->contents + 0);
12431 put_arm_insn (htab, output_bfd, plt0_entry[1],
12432 splt->contents + 4);
12433 put_arm_insn (htab, output_bfd, plt0_entry[2],
12434 splt->contents + 8);
12435 put_arm_insn (htab, output_bfd, plt0_entry[3],
12436 splt->contents + 12);
12437
12438 #ifdef FOUR_WORD_PLT
12439 /* The displacement value goes in the otherwise-unused
12440 last word of the second entry. */
12441 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12442 #else
12443 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12444 #endif
12445 }
12446 }
12447
12448 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12449 really seem like the right value. */
12450 if (splt->output_section->owner == output_bfd)
12451 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12452
12453 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12454 {
12455 /* Correct the .rel(a).plt.unloaded relocations. They will have
12456 incorrect symbol indexes. */
12457 int num_plts;
12458 unsigned char *p;
12459
12460 num_plts = ((htab->splt->size - htab->plt_header_size)
12461 / htab->plt_entry_size);
12462 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12463
12464 for (; num_plts; num_plts--)
12465 {
12466 Elf_Internal_Rela rel;
12467
12468 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12469 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12470 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12471 p += RELOC_SIZE (htab);
12472
12473 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12474 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12475 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12476 p += RELOC_SIZE (htab);
12477 }
12478 }
12479 }
12480
12481 /* Fill in the first three entries in the global offset table. */
12482 if (sgot)
12483 {
12484 if (sgot->size > 0)
12485 {
12486 if (sdyn == NULL)
12487 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12488 else
12489 bfd_put_32 (output_bfd,
12490 sdyn->output_section->vma + sdyn->output_offset,
12491 sgot->contents);
12492 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12493 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12494 }
12495
12496 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12497 }
12498
12499 return TRUE;
12500 }
12501
12502 static void
12503 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12504 {
12505 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12506 struct elf32_arm_link_hash_table *globals;
12507
12508 i_ehdrp = elf_elfheader (abfd);
12509
12510 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12511 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12512 else
12513 i_ehdrp->e_ident[EI_OSABI] = 0;
12514 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12515
12516 if (link_info)
12517 {
12518 globals = elf32_arm_hash_table (link_info);
12519 if (globals->byteswap_code)
12520 i_ehdrp->e_flags |= EF_ARM_BE8;
12521 }
12522 }
12523
12524 static enum elf_reloc_type_class
12525 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12526 {
12527 switch ((int) ELF32_R_TYPE (rela->r_info))
12528 {
12529 case R_ARM_RELATIVE:
12530 return reloc_class_relative;
12531 case R_ARM_JUMP_SLOT:
12532 return reloc_class_plt;
12533 case R_ARM_COPY:
12534 return reloc_class_copy;
12535 default:
12536 return reloc_class_normal;
12537 }
12538 }
12539
12540 /* Set the right machine number for an Arm ELF file. */
12541
12542 static bfd_boolean
12543 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12544 {
12545 if (hdr->sh_type == SHT_NOTE)
12546 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12547
12548 return TRUE;
12549 }
12550
12551 static void
12552 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12553 {
12554 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12555 }
12556
12557 /* Return TRUE if this is an unwinding table entry. */
12558
12559 static bfd_boolean
12560 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12561 {
12562 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12563 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12564 }
12565
12566
12567 /* Set the type and flags for an ARM section. We do this by
12568 the section name, which is a hack, but ought to work. */
12569
12570 static bfd_boolean
12571 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12572 {
12573 const char * name;
12574
12575 name = bfd_get_section_name (abfd, sec);
12576
12577 if (is_arm_elf_unwind_section_name (abfd, name))
12578 {
12579 hdr->sh_type = SHT_ARM_EXIDX;
12580 hdr->sh_flags |= SHF_LINK_ORDER;
12581 }
12582 return TRUE;
12583 }
12584
12585 /* Handle an ARM specific section when reading an object file. This is
12586 called when bfd_section_from_shdr finds a section with an unknown
12587 type. */
12588
12589 static bfd_boolean
12590 elf32_arm_section_from_shdr (bfd *abfd,
12591 Elf_Internal_Shdr * hdr,
12592 const char *name,
12593 int shindex)
12594 {
12595 /* There ought to be a place to keep ELF backend specific flags, but
12596 at the moment there isn't one. We just keep track of the
12597 sections by their name, instead. Fortunately, the ABI gives
12598 names for all the ARM specific sections, so we will probably get
12599 away with this. */
12600 switch (hdr->sh_type)
12601 {
12602 case SHT_ARM_EXIDX:
12603 case SHT_ARM_PREEMPTMAP:
12604 case SHT_ARM_ATTRIBUTES:
12605 break;
12606
12607 default:
12608 return FALSE;
12609 }
12610
12611 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12612 return FALSE;
12613
12614 return TRUE;
12615 }
12616
12617 /* A structure used to record a list of sections, independently
12618 of the next and prev fields in the asection structure. */
12619 typedef struct section_list
12620 {
12621 asection * sec;
12622 struct section_list * next;
12623 struct section_list * prev;
12624 }
12625 section_list;
12626
12627 /* Unfortunately we need to keep a list of sections for which
12628 an _arm_elf_section_data structure has been allocated. This
12629 is because it is possible for functions like elf32_arm_write_section
12630 to be called on a section which has had an elf_data_structure
12631 allocated for it (and so the used_by_bfd field is valid) but
12632 for which the ARM extended version of this structure - the
12633 _arm_elf_section_data structure - has not been allocated. */
12634 static section_list * sections_with_arm_elf_section_data = NULL;
12635
12636 static void
12637 record_section_with_arm_elf_section_data (asection * sec)
12638 {
12639 struct section_list * entry;
12640
12641 entry = bfd_malloc (sizeof (* entry));
12642 if (entry == NULL)
12643 return;
12644 entry->sec = sec;
12645 entry->next = sections_with_arm_elf_section_data;
12646 entry->prev = NULL;
12647 if (entry->next != NULL)
12648 entry->next->prev = entry;
12649 sections_with_arm_elf_section_data = entry;
12650 }
12651
12652 static struct section_list *
12653 find_arm_elf_section_entry (asection * sec)
12654 {
12655 struct section_list * entry;
12656 static struct section_list * last_entry = NULL;
12657
12658 /* This is a short cut for the typical case where the sections are added
12659 to the sections_with_arm_elf_section_data list in forward order and
12660 then looked up here in backwards order. This makes a real difference
12661 to the ld-srec/sec64k.exp linker test. */
12662 entry = sections_with_arm_elf_section_data;
12663 if (last_entry != NULL)
12664 {
12665 if (last_entry->sec == sec)
12666 entry = last_entry;
12667 else if (last_entry->next != NULL
12668 && last_entry->next->sec == sec)
12669 entry = last_entry->next;
12670 }
12671
12672 for (; entry; entry = entry->next)
12673 if (entry->sec == sec)
12674 break;
12675
12676 if (entry)
12677 /* Record the entry prior to this one - it is the entry we are most
12678 likely to want to locate next time. Also this way if we have been
12679 called from unrecord_section_with_arm_elf_section_data() we will not
12680 be caching a pointer that is about to be freed. */
12681 last_entry = entry->prev;
12682
12683 return entry;
12684 }
12685
12686 static _arm_elf_section_data *
12687 get_arm_elf_section_data (asection * sec)
12688 {
12689 struct section_list * entry;
12690
12691 entry = find_arm_elf_section_entry (sec);
12692
12693 if (entry)
12694 return elf32_arm_section_data (entry->sec);
12695 else
12696 return NULL;
12697 }
12698
12699 static void
12700 unrecord_section_with_arm_elf_section_data (asection * sec)
12701 {
12702 struct section_list * entry;
12703
12704 entry = find_arm_elf_section_entry (sec);
12705
12706 if (entry)
12707 {
12708 if (entry->prev != NULL)
12709 entry->prev->next = entry->next;
12710 if (entry->next != NULL)
12711 entry->next->prev = entry->prev;
12712 if (entry == sections_with_arm_elf_section_data)
12713 sections_with_arm_elf_section_data = entry->next;
12714 free (entry);
12715 }
12716 }
12717
12718
12719 typedef struct
12720 {
12721 void *finfo;
12722 struct bfd_link_info *info;
12723 asection *sec;
12724 int sec_shndx;
12725 int (*func) (void *, const char *, Elf_Internal_Sym *,
12726 asection *, struct elf_link_hash_entry *);
12727 } output_arch_syminfo;
12728
12729 enum map_symbol_type
12730 {
12731 ARM_MAP_ARM,
12732 ARM_MAP_THUMB,
12733 ARM_MAP_DATA
12734 };
12735
12736
12737 /* Output a single mapping symbol. */
12738
12739 static bfd_boolean
12740 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12741 enum map_symbol_type type,
12742 bfd_vma offset)
12743 {
12744 static const char *names[3] = {"$a", "$t", "$d"};
12745 struct elf32_arm_link_hash_table *htab;
12746 Elf_Internal_Sym sym;
12747
12748 htab = elf32_arm_hash_table (osi->info);
12749 sym.st_value = osi->sec->output_section->vma
12750 + osi->sec->output_offset
12751 + offset;
12752 sym.st_size = 0;
12753 sym.st_other = 0;
12754 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12755 sym.st_shndx = osi->sec_shndx;
12756 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12757 }
12758
12759
12760 /* Output mapping symbols for PLT entries associated with H. */
12761
12762 static bfd_boolean
12763 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12764 {
12765 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12766 struct elf32_arm_link_hash_table *htab;
12767 struct elf32_arm_link_hash_entry *eh;
12768 bfd_vma addr;
12769
12770 htab = elf32_arm_hash_table (osi->info);
12771
12772 if (h->root.type == bfd_link_hash_indirect)
12773 return TRUE;
12774
12775 if (h->root.type == bfd_link_hash_warning)
12776 /* When warning symbols are created, they **replace** the "real"
12777 entry in the hash table, thus we never get to see the real
12778 symbol in a hash traversal. So look at it now. */
12779 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12780
12781 if (h->plt.offset == (bfd_vma) -1)
12782 return TRUE;
12783
12784 eh = (struct elf32_arm_link_hash_entry *) h;
12785 addr = h->plt.offset;
12786 if (htab->symbian_p)
12787 {
12788 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12789 return FALSE;
12790 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12791 return FALSE;
12792 }
12793 else if (htab->vxworks_p)
12794 {
12795 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12796 return FALSE;
12797 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12798 return FALSE;
12799 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12800 return FALSE;
12801 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12802 return FALSE;
12803 }
12804 else
12805 {
12806 bfd_signed_vma thumb_refs;
12807
12808 thumb_refs = eh->plt_thumb_refcount;
12809 if (!htab->use_blx)
12810 thumb_refs += eh->plt_maybe_thumb_refcount;
12811
12812 if (thumb_refs > 0)
12813 {
12814 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12815 return FALSE;
12816 }
12817 #ifdef FOUR_WORD_PLT
12818 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12819 return FALSE;
12820 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12821 return FALSE;
12822 #else
12823 /* A three-word PLT with no Thumb thunk contains only Arm code,
12824 so only need to output a mapping symbol for the first PLT entry and
12825 entries with thumb thunks. */
12826 if (thumb_refs > 0 || addr == 20)
12827 {
12828 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12829 return FALSE;
12830 }
12831 #endif
12832 }
12833
12834 return TRUE;
12835 }
12836
12837 /* Output a single local symbol for a generated stub. */
12838
12839 static bfd_boolean
12840 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12841 bfd_vma offset, bfd_vma size)
12842 {
12843 struct elf32_arm_link_hash_table *htab;
12844 Elf_Internal_Sym sym;
12845
12846 htab = elf32_arm_hash_table (osi->info);
12847 sym.st_value = osi->sec->output_section->vma
12848 + osi->sec->output_offset
12849 + offset;
12850 sym.st_size = size;
12851 sym.st_other = 0;
12852 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12853 sym.st_shndx = osi->sec_shndx;
12854 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12855 }
12856
12857 static bfd_boolean
12858 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12859 void * in_arg)
12860 {
12861 struct elf32_arm_stub_hash_entry *stub_entry;
12862 struct bfd_link_info *info;
12863 struct elf32_arm_link_hash_table *htab;
12864 asection *stub_sec;
12865 bfd_vma addr;
12866 char *stub_name;
12867 output_arch_syminfo *osi;
12868 const insn_sequence *template;
12869 enum stub_insn_type prev_type;
12870 int size;
12871 int i;
12872 enum map_symbol_type sym_type;
12873
12874 /* Massage our args to the form they really have. */
12875 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12876 osi = (output_arch_syminfo *) in_arg;
12877
12878 info = osi->info;
12879
12880 htab = elf32_arm_hash_table (info);
12881 stub_sec = stub_entry->stub_sec;
12882
12883 /* Ensure this stub is attached to the current section being
12884 processed. */
12885 if (stub_sec != osi->sec)
12886 return TRUE;
12887
12888 addr = (bfd_vma) stub_entry->stub_offset;
12889 stub_name = stub_entry->output_name;
12890
12891 template = stub_entry->stub_template;
12892 switch (template[0].type)
12893 {
12894 case ARM_TYPE:
12895 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12896 return FALSE;
12897 break;
12898 case THUMB16_TYPE:
12899 case THUMB32_TYPE:
12900 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12901 stub_entry->stub_size))
12902 return FALSE;
12903 break;
12904 default:
12905 BFD_FAIL ();
12906 return 0;
12907 }
12908
12909 prev_type = DATA_TYPE;
12910 size = 0;
12911 for (i = 0; i < stub_entry->stub_template_size; i++)
12912 {
12913 switch (template[i].type)
12914 {
12915 case ARM_TYPE:
12916 sym_type = ARM_MAP_ARM;
12917 break;
12918
12919 case THUMB16_TYPE:
12920 case THUMB32_TYPE:
12921 sym_type = ARM_MAP_THUMB;
12922 break;
12923
12924 case DATA_TYPE:
12925 sym_type = ARM_MAP_DATA;
12926 break;
12927
12928 default:
12929 BFD_FAIL ();
12930 return FALSE;
12931 }
12932
12933 if (template[i].type != prev_type)
12934 {
12935 prev_type = template[i].type;
12936 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12937 return FALSE;
12938 }
12939
12940 switch (template[i].type)
12941 {
12942 case ARM_TYPE:
12943 case THUMB32_TYPE:
12944 size += 4;
12945 break;
12946
12947 case THUMB16_TYPE:
12948 size += 2;
12949 break;
12950
12951 case DATA_TYPE:
12952 size += 4;
12953 break;
12954
12955 default:
12956 BFD_FAIL ();
12957 return FALSE;
12958 }
12959 }
12960
12961 return TRUE;
12962 }
12963
12964 /* Output mapping symbols for linker generated sections. */
12965
12966 static bfd_boolean
12967 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12968 struct bfd_link_info *info,
12969 void *finfo,
12970 int (*func) (void *, const char *,
12971 Elf_Internal_Sym *,
12972 asection *,
12973 struct elf_link_hash_entry *))
12974 {
12975 output_arch_syminfo osi;
12976 struct elf32_arm_link_hash_table *htab;
12977 bfd_vma offset;
12978 bfd_size_type size;
12979
12980 htab = elf32_arm_hash_table (info);
12981 check_use_blx (htab);
12982
12983 osi.finfo = finfo;
12984 osi.info = info;
12985 osi.func = func;
12986
12987 /* ARM->Thumb glue. */
12988 if (htab->arm_glue_size > 0)
12989 {
12990 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12991 ARM2THUMB_GLUE_SECTION_NAME);
12992
12993 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12994 (output_bfd, osi.sec->output_section);
12995 if (info->shared || htab->root.is_relocatable_executable
12996 || htab->pic_veneer)
12997 size = ARM2THUMB_PIC_GLUE_SIZE;
12998 else if (htab->use_blx)
12999 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13000 else
13001 size = ARM2THUMB_STATIC_GLUE_SIZE;
13002
13003 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13004 {
13005 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13006 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13007 }
13008 }
13009
13010 /* Thumb->ARM glue. */
13011 if (htab->thumb_glue_size > 0)
13012 {
13013 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13014 THUMB2ARM_GLUE_SECTION_NAME);
13015
13016 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13017 (output_bfd, osi.sec->output_section);
13018 size = THUMB2ARM_GLUE_SIZE;
13019
13020 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13021 {
13022 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13023 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13024 }
13025 }
13026
13027 /* ARMv4 BX veneers. */
13028 if (htab->bx_glue_size > 0)
13029 {
13030 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13031 ARM_BX_GLUE_SECTION_NAME);
13032
13033 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13034 (output_bfd, osi.sec->output_section);
13035
13036 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13037 }
13038
13039 /* Long calls stubs. */
13040 if (htab->stub_bfd && htab->stub_bfd->sections)
13041 {
13042 asection* stub_sec;
13043
13044 for (stub_sec = htab->stub_bfd->sections;
13045 stub_sec != NULL;
13046 stub_sec = stub_sec->next)
13047 {
13048 /* Ignore non-stub sections. */
13049 if (!strstr (stub_sec->name, STUB_SUFFIX))
13050 continue;
13051
13052 osi.sec = stub_sec;
13053
13054 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13055 (output_bfd, osi.sec->output_section);
13056
13057 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13058 }
13059 }
13060
13061 /* Finally, output mapping symbols for the PLT. */
13062 if (!htab->splt || htab->splt->size == 0)
13063 return TRUE;
13064
13065 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13066 htab->splt->output_section);
13067 osi.sec = htab->splt;
13068 /* Output mapping symbols for the plt header. SymbianOS does not have a
13069 plt header. */
13070 if (htab->vxworks_p)
13071 {
13072 /* VxWorks shared libraries have no PLT header. */
13073 if (!info->shared)
13074 {
13075 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13076 return FALSE;
13077 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13078 return FALSE;
13079 }
13080 }
13081 else if (!htab->symbian_p)
13082 {
13083 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13084 return FALSE;
13085 #ifndef FOUR_WORD_PLT
13086 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13087 return FALSE;
13088 #endif
13089 }
13090
13091 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13092 return TRUE;
13093 }
13094
13095 /* Allocate target specific section data. */
13096
13097 static bfd_boolean
13098 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13099 {
13100 if (!sec->used_by_bfd)
13101 {
13102 _arm_elf_section_data *sdata;
13103 bfd_size_type amt = sizeof (*sdata);
13104
13105 sdata = bfd_zalloc (abfd, amt);
13106 if (sdata == NULL)
13107 return FALSE;
13108 sec->used_by_bfd = sdata;
13109 }
13110
13111 record_section_with_arm_elf_section_data (sec);
13112
13113 return _bfd_elf_new_section_hook (abfd, sec);
13114 }
13115
13116
13117 /* Used to order a list of mapping symbols by address. */
13118
13119 static int
13120 elf32_arm_compare_mapping (const void * a, const void * b)
13121 {
13122 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13123 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13124
13125 if (amap->vma > bmap->vma)
13126 return 1;
13127 else if (amap->vma < bmap->vma)
13128 return -1;
13129 else if (amap->type > bmap->type)
13130 /* Ensure results do not depend on the host qsort for objects with
13131 multiple mapping symbols at the same address by sorting on type
13132 after vma. */
13133 return 1;
13134 else if (amap->type < bmap->type)
13135 return -1;
13136 else
13137 return 0;
13138 }
13139
13140 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13141
13142 static unsigned long
13143 offset_prel31 (unsigned long addr, bfd_vma offset)
13144 {
13145 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13146 }
13147
13148 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13149 relocations. */
13150
13151 static void
13152 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13153 {
13154 unsigned long first_word = bfd_get_32 (output_bfd, from);
13155 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13156
13157 /* High bit of first word is supposed to be zero. */
13158 if ((first_word & 0x80000000ul) == 0)
13159 first_word = offset_prel31 (first_word, offset);
13160
13161 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13162 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13163 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13164 second_word = offset_prel31 (second_word, offset);
13165
13166 bfd_put_32 (output_bfd, first_word, to);
13167 bfd_put_32 (output_bfd, second_word, to + 4);
13168 }
13169
13170 /* Data for make_branch_to_a8_stub(). */
13171
13172 struct a8_branch_to_stub_data {
13173 asection *writing_section;
13174 bfd_byte *contents;
13175 };
13176
13177
13178 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13179 places for a particular section. */
13180
13181 static bfd_boolean
13182 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13183 void *in_arg)
13184 {
13185 struct elf32_arm_stub_hash_entry *stub_entry;
13186 struct a8_branch_to_stub_data *data;
13187 bfd_byte *contents;
13188 unsigned long branch_insn;
13189 bfd_vma veneered_insn_loc, veneer_entry_loc;
13190 bfd_signed_vma branch_offset;
13191 bfd *abfd;
13192 unsigned int index;
13193
13194 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13195 data = (struct a8_branch_to_stub_data *) in_arg;
13196
13197 if (stub_entry->target_section != data->writing_section
13198 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13199 return TRUE;
13200
13201 contents = data->contents;
13202
13203 veneered_insn_loc = stub_entry->target_section->output_section->vma
13204 + stub_entry->target_section->output_offset
13205 + stub_entry->target_value;
13206
13207 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13208 + stub_entry->stub_sec->output_offset
13209 + stub_entry->stub_offset;
13210
13211 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13212 veneered_insn_loc &= ~3u;
13213
13214 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13215
13216 abfd = stub_entry->target_section->owner;
13217 index = stub_entry->target_value;
13218
13219 /* We attempt to avoid this condition by setting stubs_always_after_branch
13220 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13221 This check is just to be on the safe side... */
13222 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13223 {
13224 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13225 "allocated in unsafe location"), abfd);
13226 return FALSE;
13227 }
13228
13229 switch (stub_entry->stub_type)
13230 {
13231 case arm_stub_a8_veneer_b:
13232 case arm_stub_a8_veneer_b_cond:
13233 branch_insn = 0xf0009000;
13234 goto jump24;
13235
13236 case arm_stub_a8_veneer_blx:
13237 branch_insn = 0xf000e800;
13238 goto jump24;
13239
13240 case arm_stub_a8_veneer_bl:
13241 {
13242 unsigned int i1, j1, i2, j2, s;
13243
13244 branch_insn = 0xf000d000;
13245
13246 jump24:
13247 if (branch_offset < -16777216 || branch_offset > 16777214)
13248 {
13249 /* There's not much we can do apart from complain if this
13250 happens. */
13251 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13252 "of range (input file too large)"), abfd);
13253 return FALSE;
13254 }
13255
13256 /* i1 = not(j1 eor s), so:
13257 not i1 = j1 eor s
13258 j1 = (not i1) eor s. */
13259
13260 branch_insn |= (branch_offset >> 1) & 0x7ff;
13261 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13262 i2 = (branch_offset >> 22) & 1;
13263 i1 = (branch_offset >> 23) & 1;
13264 s = (branch_offset >> 24) & 1;
13265 j1 = (!i1) ^ s;
13266 j2 = (!i2) ^ s;
13267 branch_insn |= j2 << 11;
13268 branch_insn |= j1 << 13;
13269 branch_insn |= s << 26;
13270 }
13271 break;
13272
13273 default:
13274 BFD_FAIL ();
13275 return FALSE;
13276 }
13277
13278 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13279 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13280
13281 return TRUE;
13282 }
13283
13284 /* Do code byteswapping. Return FALSE afterwards so that the section is
13285 written out as normal. */
13286
13287 static bfd_boolean
13288 elf32_arm_write_section (bfd *output_bfd,
13289 struct bfd_link_info *link_info,
13290 asection *sec,
13291 bfd_byte *contents)
13292 {
13293 unsigned int mapcount, errcount;
13294 _arm_elf_section_data *arm_data;
13295 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13296 elf32_arm_section_map *map;
13297 elf32_vfp11_erratum_list *errnode;
13298 bfd_vma ptr;
13299 bfd_vma end;
13300 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13301 bfd_byte tmp;
13302 unsigned int i;
13303
13304 /* If this section has not been allocated an _arm_elf_section_data
13305 structure then we cannot record anything. */
13306 arm_data = get_arm_elf_section_data (sec);
13307 if (arm_data == NULL)
13308 return FALSE;
13309
13310 mapcount = arm_data->mapcount;
13311 map = arm_data->map;
13312 errcount = arm_data->erratumcount;
13313
13314 if (errcount != 0)
13315 {
13316 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13317
13318 for (errnode = arm_data->erratumlist; errnode != 0;
13319 errnode = errnode->next)
13320 {
13321 bfd_vma index = errnode->vma - offset;
13322
13323 switch (errnode->type)
13324 {
13325 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13326 {
13327 bfd_vma branch_to_veneer;
13328 /* Original condition code of instruction, plus bit mask for
13329 ARM B instruction. */
13330 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13331 | 0x0a000000;
13332
13333 /* The instruction is before the label. */
13334 index -= 4;
13335
13336 /* Above offset included in -4 below. */
13337 branch_to_veneer = errnode->u.b.veneer->vma
13338 - errnode->vma - 4;
13339
13340 if ((signed) branch_to_veneer < -(1 << 25)
13341 || (signed) branch_to_veneer >= (1 << 25))
13342 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13343 "range"), output_bfd);
13344
13345 insn |= (branch_to_veneer >> 2) & 0xffffff;
13346 contents[endianflip ^ index] = insn & 0xff;
13347 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13348 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13349 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13350 }
13351 break;
13352
13353 case VFP11_ERRATUM_ARM_VENEER:
13354 {
13355 bfd_vma branch_from_veneer;
13356 unsigned int insn;
13357
13358 /* Take size of veneer into account. */
13359 branch_from_veneer = errnode->u.v.branch->vma
13360 - errnode->vma - 12;
13361
13362 if ((signed) branch_from_veneer < -(1 << 25)
13363 || (signed) branch_from_veneer >= (1 << 25))
13364 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13365 "range"), output_bfd);
13366
13367 /* Original instruction. */
13368 insn = errnode->u.v.branch->u.b.vfp_insn;
13369 contents[endianflip ^ index] = insn & 0xff;
13370 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13371 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13372 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13373
13374 /* Branch back to insn after original insn. */
13375 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13376 contents[endianflip ^ (index + 4)] = insn & 0xff;
13377 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13378 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13379 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13380 }
13381 break;
13382
13383 default:
13384 abort ();
13385 }
13386 }
13387 }
13388
13389 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13390 {
13391 arm_unwind_table_edit *edit_node
13392 = arm_data->u.exidx.unwind_edit_list;
13393 /* Now, sec->size is the size of the section we will write. The original
13394 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13395 markers) was sec->rawsize. (This isn't the case if we perform no
13396 edits, then rawsize will be zero and we should use size). */
13397 bfd_byte *edited_contents = bfd_malloc (sec->size);
13398 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13399 unsigned int in_index, out_index;
13400 bfd_vma add_to_offsets = 0;
13401
13402 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13403 {
13404 if (edit_node)
13405 {
13406 unsigned int edit_index = edit_node->index;
13407
13408 if (in_index < edit_index && in_index * 8 < input_size)
13409 {
13410 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13411 contents + in_index * 8, add_to_offsets);
13412 out_index++;
13413 in_index++;
13414 }
13415 else if (in_index == edit_index
13416 || (in_index * 8 >= input_size
13417 && edit_index == UINT_MAX))
13418 {
13419 switch (edit_node->type)
13420 {
13421 case DELETE_EXIDX_ENTRY:
13422 in_index++;
13423 add_to_offsets += 8;
13424 break;
13425
13426 case INSERT_EXIDX_CANTUNWIND_AT_END:
13427 {
13428 asection *text_sec = edit_node->linked_section;
13429 bfd_vma text_offset = text_sec->output_section->vma
13430 + text_sec->output_offset
13431 + text_sec->size;
13432 bfd_vma exidx_offset = offset + out_index * 8;
13433 unsigned long prel31_offset;
13434
13435 /* Note: this is meant to be equivalent to an
13436 R_ARM_PREL31 relocation. These synthetic
13437 EXIDX_CANTUNWIND markers are not relocated by the
13438 usual BFD method. */
13439 prel31_offset = (text_offset - exidx_offset)
13440 & 0x7ffffffful;
13441
13442 /* First address we can't unwind. */
13443 bfd_put_32 (output_bfd, prel31_offset,
13444 &edited_contents[out_index * 8]);
13445
13446 /* Code for EXIDX_CANTUNWIND. */
13447 bfd_put_32 (output_bfd, 0x1,
13448 &edited_contents[out_index * 8 + 4]);
13449
13450 out_index++;
13451 add_to_offsets -= 8;
13452 }
13453 break;
13454 }
13455
13456 edit_node = edit_node->next;
13457 }
13458 }
13459 else
13460 {
13461 /* No more edits, copy remaining entries verbatim. */
13462 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13463 contents + in_index * 8, add_to_offsets);
13464 out_index++;
13465 in_index++;
13466 }
13467 }
13468
13469 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13470 bfd_set_section_contents (output_bfd, sec->output_section,
13471 edited_contents,
13472 (file_ptr) sec->output_offset, sec->size);
13473
13474 return TRUE;
13475 }
13476
13477 /* Fix code to point to Cortex-A8 erratum stubs. */
13478 if (globals->fix_cortex_a8)
13479 {
13480 struct a8_branch_to_stub_data data;
13481
13482 data.writing_section = sec;
13483 data.contents = contents;
13484
13485 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13486 &data);
13487 }
13488
13489 if (mapcount == 0)
13490 return FALSE;
13491
13492 if (globals->byteswap_code)
13493 {
13494 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13495
13496 ptr = map[0].vma;
13497 for (i = 0; i < mapcount; i++)
13498 {
13499 if (i == mapcount - 1)
13500 end = sec->size;
13501 else
13502 end = map[i + 1].vma;
13503
13504 switch (map[i].type)
13505 {
13506 case 'a':
13507 /* Byte swap code words. */
13508 while (ptr + 3 < end)
13509 {
13510 tmp = contents[ptr];
13511 contents[ptr] = contents[ptr + 3];
13512 contents[ptr + 3] = tmp;
13513 tmp = contents[ptr + 1];
13514 contents[ptr + 1] = contents[ptr + 2];
13515 contents[ptr + 2] = tmp;
13516 ptr += 4;
13517 }
13518 break;
13519
13520 case 't':
13521 /* Byte swap code halfwords. */
13522 while (ptr + 1 < end)
13523 {
13524 tmp = contents[ptr];
13525 contents[ptr] = contents[ptr + 1];
13526 contents[ptr + 1] = tmp;
13527 ptr += 2;
13528 }
13529 break;
13530
13531 case 'd':
13532 /* Leave data alone. */
13533 break;
13534 }
13535 ptr = end;
13536 }
13537 }
13538
13539 free (map);
13540 arm_data->mapcount = 0;
13541 arm_data->mapsize = 0;
13542 arm_data->map = NULL;
13543 unrecord_section_with_arm_elf_section_data (sec);
13544
13545 return FALSE;
13546 }
13547
13548 static void
13549 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13550 asection * sec,
13551 void * ignore ATTRIBUTE_UNUSED)
13552 {
13553 unrecord_section_with_arm_elf_section_data (sec);
13554 }
13555
13556 static bfd_boolean
13557 elf32_arm_close_and_cleanup (bfd * abfd)
13558 {
13559 if (abfd->sections)
13560 bfd_map_over_sections (abfd,
13561 unrecord_section_via_map_over_sections,
13562 NULL);
13563
13564 return _bfd_elf_close_and_cleanup (abfd);
13565 }
13566
13567 static bfd_boolean
13568 elf32_arm_bfd_free_cached_info (bfd * abfd)
13569 {
13570 if (abfd->sections)
13571 bfd_map_over_sections (abfd,
13572 unrecord_section_via_map_over_sections,
13573 NULL);
13574
13575 return _bfd_free_cached_info (abfd);
13576 }
13577
13578 /* Display STT_ARM_TFUNC symbols as functions. */
13579
13580 static void
13581 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13582 asymbol *asym)
13583 {
13584 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13585
13586 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13587 elfsym->symbol.flags |= BSF_FUNCTION;
13588 }
13589
13590
13591 /* Mangle thumb function symbols as we read them in. */
13592
13593 static bfd_boolean
13594 elf32_arm_swap_symbol_in (bfd * abfd,
13595 const void *psrc,
13596 const void *pshn,
13597 Elf_Internal_Sym *dst)
13598 {
13599 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13600 return FALSE;
13601
13602 /* New EABI objects mark thumb function symbols by setting the low bit of
13603 the address. Turn these into STT_ARM_TFUNC. */
13604 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13605 && (dst->st_value & 1))
13606 {
13607 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13608 dst->st_value &= ~(bfd_vma) 1;
13609 }
13610 return TRUE;
13611 }
13612
13613
13614 /* Mangle thumb function symbols as we write them out. */
13615
13616 static void
13617 elf32_arm_swap_symbol_out (bfd *abfd,
13618 const Elf_Internal_Sym *src,
13619 void *cdst,
13620 void *shndx)
13621 {
13622 Elf_Internal_Sym newsym;
13623
13624 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13625 of the address set, as per the new EABI. We do this unconditionally
13626 because objcopy does not set the elf header flags until after
13627 it writes out the symbol table. */
13628 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13629 {
13630 newsym = *src;
13631 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13632 if (newsym.st_shndx != SHN_UNDEF)
13633 {
13634 /* Do this only for defined symbols. At link type, the static
13635 linker will simulate the work of dynamic linker of resolving
13636 symbols and will carry over the thumbness of found symbols to
13637 the output symbol table. It's not clear how it happens, but
13638 the thumbness of undefined symbols can well be different at
13639 runtime, and writing '1' for them will be confusing for users
13640 and possibly for dynamic linker itself.
13641 */
13642 newsym.st_value |= 1;
13643 }
13644
13645 src = &newsym;
13646 }
13647 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13648 }
13649
13650 /* Add the PT_ARM_EXIDX program header. */
13651
13652 static bfd_boolean
13653 elf32_arm_modify_segment_map (bfd *abfd,
13654 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13655 {
13656 struct elf_segment_map *m;
13657 asection *sec;
13658
13659 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13660 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13661 {
13662 /* If there is already a PT_ARM_EXIDX header, then we do not
13663 want to add another one. This situation arises when running
13664 "strip"; the input binary already has the header. */
13665 m = elf_tdata (abfd)->segment_map;
13666 while (m && m->p_type != PT_ARM_EXIDX)
13667 m = m->next;
13668 if (!m)
13669 {
13670 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13671 if (m == NULL)
13672 return FALSE;
13673 m->p_type = PT_ARM_EXIDX;
13674 m->count = 1;
13675 m->sections[0] = sec;
13676
13677 m->next = elf_tdata (abfd)->segment_map;
13678 elf_tdata (abfd)->segment_map = m;
13679 }
13680 }
13681
13682 return TRUE;
13683 }
13684
13685 /* We may add a PT_ARM_EXIDX program header. */
13686
13687 static int
13688 elf32_arm_additional_program_headers (bfd *abfd,
13689 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13690 {
13691 asection *sec;
13692
13693 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13694 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13695 return 1;
13696 else
13697 return 0;
13698 }
13699
13700 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13701
13702 static bfd_boolean
13703 elf32_arm_is_function_type (unsigned int type)
13704 {
13705 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13706 }
13707
13708 /* We use this to override swap_symbol_in and swap_symbol_out. */
13709 const struct elf_size_info elf32_arm_size_info =
13710 {
13711 sizeof (Elf32_External_Ehdr),
13712 sizeof (Elf32_External_Phdr),
13713 sizeof (Elf32_External_Shdr),
13714 sizeof (Elf32_External_Rel),
13715 sizeof (Elf32_External_Rela),
13716 sizeof (Elf32_External_Sym),
13717 sizeof (Elf32_External_Dyn),
13718 sizeof (Elf_External_Note),
13719 4,
13720 1,
13721 32, 2,
13722 ELFCLASS32, EV_CURRENT,
13723 bfd_elf32_write_out_phdrs,
13724 bfd_elf32_write_shdrs_and_ehdr,
13725 bfd_elf32_checksum_contents,
13726 bfd_elf32_write_relocs,
13727 elf32_arm_swap_symbol_in,
13728 elf32_arm_swap_symbol_out,
13729 bfd_elf32_slurp_reloc_table,
13730 bfd_elf32_slurp_symbol_table,
13731 bfd_elf32_swap_dyn_in,
13732 bfd_elf32_swap_dyn_out,
13733 bfd_elf32_swap_reloc_in,
13734 bfd_elf32_swap_reloc_out,
13735 bfd_elf32_swap_reloca_in,
13736 bfd_elf32_swap_reloca_out
13737 };
13738
13739 #define ELF_ARCH bfd_arch_arm
13740 #define ELF_MACHINE_CODE EM_ARM
13741 #ifdef __QNXTARGET__
13742 #define ELF_MAXPAGESIZE 0x1000
13743 #else
13744 #define ELF_MAXPAGESIZE 0x8000
13745 #endif
13746 #define ELF_MINPAGESIZE 0x1000
13747 #define ELF_COMMONPAGESIZE 0x1000
13748
13749 #define bfd_elf32_mkobject elf32_arm_mkobject
13750
13751 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13752 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13753 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13754 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13755 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13756 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13757 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13758 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13759 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13760 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13761 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13762 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13763 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13764 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13765 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13766
13767 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13768 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13769 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13770 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13771 #define elf_backend_check_relocs elf32_arm_check_relocs
13772 #define elf_backend_relocate_section elf32_arm_relocate_section
13773 #define elf_backend_write_section elf32_arm_write_section
13774 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13775 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13776 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13777 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13778 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13779 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13780 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13781 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13782 #define elf_backend_object_p elf32_arm_object_p
13783 #define elf_backend_section_flags elf32_arm_section_flags
13784 #define elf_backend_fake_sections elf32_arm_fake_sections
13785 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13786 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13787 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13788 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13789 #define elf_backend_size_info elf32_arm_size_info
13790 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13791 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13792 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13793 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13794 #define elf_backend_is_function_type elf32_arm_is_function_type
13795
13796 #define elf_backend_can_refcount 1
13797 #define elf_backend_can_gc_sections 1
13798 #define elf_backend_plt_readonly 1
13799 #define elf_backend_want_got_plt 1
13800 #define elf_backend_want_plt_sym 0
13801 #define elf_backend_may_use_rel_p 1
13802 #define elf_backend_may_use_rela_p 0
13803 #define elf_backend_default_use_rela_p 0
13804
13805 #define elf_backend_got_header_size 12
13806
13807 #undef elf_backend_obj_attrs_vendor
13808 #define elf_backend_obj_attrs_vendor "aeabi"
13809 #undef elf_backend_obj_attrs_section
13810 #define elf_backend_obj_attrs_section ".ARM.attributes"
13811 #undef elf_backend_obj_attrs_arg_type
13812 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13813 #undef elf_backend_obj_attrs_section_type
13814 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13815 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13816
13817 #include "elf32-target.h"
13818
13819 /* VxWorks Targets. */
13820
13821 #undef TARGET_LITTLE_SYM
13822 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13823 #undef TARGET_LITTLE_NAME
13824 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13825 #undef TARGET_BIG_SYM
13826 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13827 #undef TARGET_BIG_NAME
13828 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13829
13830 /* Like elf32_arm_link_hash_table_create -- but overrides
13831 appropriately for VxWorks. */
13832
13833 static struct bfd_link_hash_table *
13834 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13835 {
13836 struct bfd_link_hash_table *ret;
13837
13838 ret = elf32_arm_link_hash_table_create (abfd);
13839 if (ret)
13840 {
13841 struct elf32_arm_link_hash_table *htab
13842 = (struct elf32_arm_link_hash_table *) ret;
13843 htab->use_rel = 0;
13844 htab->vxworks_p = 1;
13845 }
13846 return ret;
13847 }
13848
13849 static void
13850 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13851 {
13852 elf32_arm_final_write_processing (abfd, linker);
13853 elf_vxworks_final_write_processing (abfd, linker);
13854 }
13855
13856 #undef elf32_bed
13857 #define elf32_bed elf32_arm_vxworks_bed
13858
13859 #undef bfd_elf32_bfd_link_hash_table_create
13860 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13861 #undef elf_backend_add_symbol_hook
13862 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13863 #undef elf_backend_final_write_processing
13864 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13865 #undef elf_backend_emit_relocs
13866 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13867
13868 #undef elf_backend_may_use_rel_p
13869 #define elf_backend_may_use_rel_p 0
13870 #undef elf_backend_may_use_rela_p
13871 #define elf_backend_may_use_rela_p 1
13872 #undef elf_backend_default_use_rela_p
13873 #define elf_backend_default_use_rela_p 1
13874 #undef elf_backend_want_plt_sym
13875 #define elf_backend_want_plt_sym 1
13876 #undef ELF_MAXPAGESIZE
13877 #define ELF_MAXPAGESIZE 0x1000
13878
13879 #include "elf32-target.h"
13880
13881
13882 /* Symbian OS Targets. */
13883
13884 #undef TARGET_LITTLE_SYM
13885 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13886 #undef TARGET_LITTLE_NAME
13887 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13888 #undef TARGET_BIG_SYM
13889 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13890 #undef TARGET_BIG_NAME
13891 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13892
13893 /* Like elf32_arm_link_hash_table_create -- but overrides
13894 appropriately for Symbian OS. */
13895
13896 static struct bfd_link_hash_table *
13897 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13898 {
13899 struct bfd_link_hash_table *ret;
13900
13901 ret = elf32_arm_link_hash_table_create (abfd);
13902 if (ret)
13903 {
13904 struct elf32_arm_link_hash_table *htab
13905 = (struct elf32_arm_link_hash_table *)ret;
13906 /* There is no PLT header for Symbian OS. */
13907 htab->plt_header_size = 0;
13908 /* The PLT entries are each one instruction and one word. */
13909 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13910 htab->symbian_p = 1;
13911 /* Symbian uses armv5t or above, so use_blx is always true. */
13912 htab->use_blx = 1;
13913 htab->root.is_relocatable_executable = 1;
13914 }
13915 return ret;
13916 }
13917
13918 static const struct bfd_elf_special_section
13919 elf32_arm_symbian_special_sections[] =
13920 {
13921 /* In a BPABI executable, the dynamic linking sections do not go in
13922 the loadable read-only segment. The post-linker may wish to
13923 refer to these sections, but they are not part of the final
13924 program image. */
13925 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13926 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13927 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13928 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13929 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13930 /* These sections do not need to be writable as the SymbianOS
13931 postlinker will arrange things so that no dynamic relocation is
13932 required. */
13933 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13934 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13935 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13936 { NULL, 0, 0, 0, 0 }
13937 };
13938
13939 static void
13940 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13941 struct bfd_link_info *link_info)
13942 {
13943 /* BPABI objects are never loaded directly by an OS kernel; they are
13944 processed by a postlinker first, into an OS-specific format. If
13945 the D_PAGED bit is set on the file, BFD will align segments on
13946 page boundaries, so that an OS can directly map the file. With
13947 BPABI objects, that just results in wasted space. In addition,
13948 because we clear the D_PAGED bit, map_sections_to_segments will
13949 recognize that the program headers should not be mapped into any
13950 loadable segment. */
13951 abfd->flags &= ~D_PAGED;
13952 elf32_arm_begin_write_processing (abfd, link_info);
13953 }
13954
13955 static bfd_boolean
13956 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13957 struct bfd_link_info *info)
13958 {
13959 struct elf_segment_map *m;
13960 asection *dynsec;
13961
13962 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13963 segment. However, because the .dynamic section is not marked
13964 with SEC_LOAD, the generic ELF code will not create such a
13965 segment. */
13966 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13967 if (dynsec)
13968 {
13969 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13970 if (m->p_type == PT_DYNAMIC)
13971 break;
13972
13973 if (m == NULL)
13974 {
13975 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13976 m->next = elf_tdata (abfd)->segment_map;
13977 elf_tdata (abfd)->segment_map = m;
13978 }
13979 }
13980
13981 /* Also call the generic arm routine. */
13982 return elf32_arm_modify_segment_map (abfd, info);
13983 }
13984
13985 /* Return address for Ith PLT stub in section PLT, for relocation REL
13986 or (bfd_vma) -1 if it should not be included. */
13987
13988 static bfd_vma
13989 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13990 const arelent *rel ATTRIBUTE_UNUSED)
13991 {
13992 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13993 }
13994
13995
13996 #undef elf32_bed
13997 #define elf32_bed elf32_arm_symbian_bed
13998
13999 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14000 will process them and then discard them. */
14001 #undef ELF_DYNAMIC_SEC_FLAGS
14002 #define ELF_DYNAMIC_SEC_FLAGS \
14003 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14004
14005 #undef elf_backend_add_symbol_hook
14006 #undef elf_backend_emit_relocs
14007
14008 #undef bfd_elf32_bfd_link_hash_table_create
14009 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14010 #undef elf_backend_special_sections
14011 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14012 #undef elf_backend_begin_write_processing
14013 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14014 #undef elf_backend_final_write_processing
14015 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14016
14017 #undef elf_backend_modify_segment_map
14018 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14019
14020 /* There is no .got section for BPABI objects, and hence no header. */
14021 #undef elf_backend_got_header_size
14022 #define elf_backend_got_header_size 0
14023
14024 /* Similarly, there is no .got.plt section. */
14025 #undef elf_backend_want_got_plt
14026 #define elf_backend_want_got_plt 0
14027
14028 #undef elf_backend_plt_sym_val
14029 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14030
14031 #undef elf_backend_may_use_rel_p
14032 #define elf_backend_may_use_rel_p 1
14033 #undef elf_backend_may_use_rela_p
14034 #define elf_backend_may_use_rela_p 0
14035 #undef elf_backend_default_use_rela_p
14036 #define elf_backend_default_use_rela_p 0
14037 #undef elf_backend_want_plt_sym
14038 #define elf_backend_want_plt_sym 0
14039 #undef ELF_MAXPAGESIZE
14040 #define ELF_MAXPAGESIZE 0x8000
14041
14042 #include "elf32-target.h"
This page took 0.503389 seconds and 5 git commands to generate.