Undo accidental checkin of bogus change
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
40 ((HTAB)->use_rel \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
43
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
47 ((HTAB)->use_rel \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
50
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
54 ((HTAB)->use_rel \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
57
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
60
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
63
64 static struct elf_backend_data elf32_arm_vxworks_bed;
65
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
68 asection *sec,
69 bfd_byte *contents);
70
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 in that slot. */
74
75 static reloc_howto_type elf32_arm_howto_table_1[] =
76 {
77 /* No relocation. */
78 HOWTO (R_ARM_NONE, /* type */
79 0, /* rightshift */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
81 0, /* bitsize */
82 FALSE, /* pc_relative */
83 0, /* bitpos */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
88 0, /* src_mask */
89 0, /* dst_mask */
90 FALSE), /* pcrel_offset */
91
92 HOWTO (R_ARM_PC24, /* type */
93 2, /* rightshift */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
95 24, /* bitsize */
96 TRUE, /* pc_relative */
97 0, /* bitpos */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
105
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
108 0, /* rightshift */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
110 32, /* bitsize */
111 FALSE, /* pc_relative */
112 0, /* bitpos */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
120
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
123 0, /* rightshift */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
125 32, /* bitsize */
126 TRUE, /* pc_relative */
127 0, /* bitpos */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
135
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
138 0, /* rightshift */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
140 32, /* bitsize */
141 TRUE, /* pc_relative */
142 0, /* bitpos */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
150
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
153 0, /* rightshift */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
155 16, /* bitsize */
156 FALSE, /* pc_relative */
157 0, /* bitpos */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
165
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
168 0, /* rightshift */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
170 12, /* bitsize */
171 FALSE, /* pc_relative */
172 0, /* bitpos */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
180
181 HOWTO (R_ARM_THM_ABS5, /* type */
182 6, /* rightshift */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
184 5, /* bitsize */
185 FALSE, /* pc_relative */
186 0, /* bitpos */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
194
195 /* 8 bit absolute */
196 HOWTO (R_ARM_ABS8, /* type */
197 0, /* rightshift */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
199 8, /* bitsize */
200 FALSE, /* pc_relative */
201 0, /* bitpos */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
209
210 HOWTO (R_ARM_SBREL32, /* type */
211 0, /* rightshift */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
213 32, /* bitsize */
214 FALSE, /* pc_relative */
215 0, /* bitpos */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
223
224 HOWTO (R_ARM_THM_CALL, /* type */
225 1, /* rightshift */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
227 25, /* bitsize */
228 TRUE, /* pc_relative */
229 0, /* bitpos */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff07ff, /* src_mask */
235 0x07ff07ff, /* dst_mask */
236 TRUE), /* pcrel_offset */
237
238 HOWTO (R_ARM_THM_PC8, /* type */
239 1, /* rightshift */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
241 8, /* bitsize */
242 TRUE, /* pc_relative */
243 0, /* bitpos */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
251
252 HOWTO (R_ARM_BREL_ADJ, /* type */
253 1, /* rightshift */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
255 32, /* bitsize */
256 FALSE, /* pc_relative */
257 0, /* bitpos */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
265
266 HOWTO (R_ARM_SWI24, /* type */
267 0, /* rightshift */
268 0, /* size (0 = byte, 1 = short, 2 = long) */
269 0, /* bitsize */
270 FALSE, /* pc_relative */
271 0, /* bitpos */
272 complain_overflow_signed,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_SWI24", /* name */
275 FALSE, /* partial_inplace */
276 0x00000000, /* src_mask */
277 0x00000000, /* dst_mask */
278 FALSE), /* pcrel_offset */
279
280 HOWTO (R_ARM_THM_SWI8, /* type */
281 0, /* rightshift */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
283 0, /* bitsize */
284 FALSE, /* pc_relative */
285 0, /* bitpos */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
293
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
296 2, /* rightshift */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
298 25, /* bitsize */
299 TRUE, /* pc_relative */
300 0, /* bitpos */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
308
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
311 2, /* rightshift */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
313 22, /* bitsize */
314 TRUE, /* pc_relative */
315 0, /* bitpos */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff07ff, /* src_mask */
321 0x07ff07ff, /* dst_mask */
322 TRUE), /* pcrel_offset */
323
324 /* Dynamic TLS relocations. */
325
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
327 0, /* rightshift */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
329 32, /* bitsize */
330 FALSE, /* pc_relative */
331 0, /* bitpos */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
339
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
341 0, /* rightshift */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
343 32, /* bitsize */
344 FALSE, /* pc_relative */
345 0, /* bitpos */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
353
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
355 0, /* rightshift */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
357 32, /* bitsize */
358 FALSE, /* pc_relative */
359 0, /* bitpos */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
367
368 /* Relocs used in ARM Linux */
369
370 HOWTO (R_ARM_COPY, /* type */
371 0, /* rightshift */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
373 32, /* bitsize */
374 FALSE, /* pc_relative */
375 0, /* bitpos */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
383
384 HOWTO (R_ARM_GLOB_DAT, /* type */
385 0, /* rightshift */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
387 32, /* bitsize */
388 FALSE, /* pc_relative */
389 0, /* bitpos */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
397
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
399 0, /* rightshift */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
401 32, /* bitsize */
402 FALSE, /* pc_relative */
403 0, /* bitpos */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
411
412 HOWTO (R_ARM_RELATIVE, /* type */
413 0, /* rightshift */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
415 32, /* bitsize */
416 FALSE, /* pc_relative */
417 0, /* bitpos */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
425
426 HOWTO (R_ARM_GOTOFF32, /* type */
427 0, /* rightshift */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
429 32, /* bitsize */
430 FALSE, /* pc_relative */
431 0, /* bitpos */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
439
440 HOWTO (R_ARM_GOTPC, /* type */
441 0, /* rightshift */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
443 32, /* bitsize */
444 TRUE, /* pc_relative */
445 0, /* bitpos */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
453
454 HOWTO (R_ARM_GOT32, /* type */
455 0, /* rightshift */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
457 32, /* bitsize */
458 FALSE, /* pc_relative */
459 0, /* bitpos */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
467
468 HOWTO (R_ARM_PLT32, /* type */
469 2, /* rightshift */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
471 24, /* bitsize */
472 TRUE, /* pc_relative */
473 0, /* bitpos */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
481
482 HOWTO (R_ARM_CALL, /* type */
483 2, /* rightshift */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
485 24, /* bitsize */
486 TRUE, /* pc_relative */
487 0, /* bitpos */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
495
496 HOWTO (R_ARM_JUMP24, /* type */
497 2, /* rightshift */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
499 24, /* bitsize */
500 TRUE, /* pc_relative */
501 0, /* bitpos */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
509
510 HOWTO (R_ARM_THM_JUMP24, /* type */
511 1, /* rightshift */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
513 24, /* bitsize */
514 TRUE, /* pc_relative */
515 0, /* bitpos */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
523
524 HOWTO (R_ARM_BASE_ABS, /* type */
525 0, /* rightshift */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
527 32, /* bitsize */
528 FALSE, /* pc_relative */
529 0, /* bitpos */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
537
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
539 0, /* rightshift */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
541 12, /* bitsize */
542 TRUE, /* pc_relative */
543 0, /* bitpos */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
551
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
553 0, /* rightshift */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
555 12, /* bitsize */
556 TRUE, /* pc_relative */
557 8, /* bitpos */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
565
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
567 0, /* rightshift */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
569 12, /* bitsize */
570 TRUE, /* pc_relative */
571 16, /* bitpos */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
579
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
581 0, /* rightshift */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
583 12, /* bitsize */
584 FALSE, /* pc_relative */
585 0, /* bitpos */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
593
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
595 0, /* rightshift */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
597 8, /* bitsize */
598 FALSE, /* pc_relative */
599 12, /* bitpos */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
607
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
609 0, /* rightshift */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
611 8, /* bitsize */
612 FALSE, /* pc_relative */
613 20, /* bitpos */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
621
622 HOWTO (R_ARM_TARGET1, /* type */
623 0, /* rightshift */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
625 32, /* bitsize */
626 FALSE, /* pc_relative */
627 0, /* bitpos */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
635
636 HOWTO (R_ARM_ROSEGREL32, /* type */
637 0, /* rightshift */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
639 32, /* bitsize */
640 FALSE, /* pc_relative */
641 0, /* bitpos */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
649
650 HOWTO (R_ARM_V4BX, /* type */
651 0, /* rightshift */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
653 32, /* bitsize */
654 FALSE, /* pc_relative */
655 0, /* bitpos */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
663
664 HOWTO (R_ARM_TARGET2, /* type */
665 0, /* rightshift */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
667 32, /* bitsize */
668 FALSE, /* pc_relative */
669 0, /* bitpos */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
677
678 HOWTO (R_ARM_PREL31, /* type */
679 0, /* rightshift */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
681 31, /* bitsize */
682 TRUE, /* pc_relative */
683 0, /* bitpos */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
691
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
693 0, /* rightshift */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
695 16, /* bitsize */
696 FALSE, /* pc_relative */
697 0, /* bitpos */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
705
706 HOWTO (R_ARM_MOVT_ABS, /* type */
707 0, /* rightshift */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
709 16, /* bitsize */
710 FALSE, /* pc_relative */
711 0, /* bitpos */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
719
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
721 0, /* rightshift */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
723 16, /* bitsize */
724 TRUE, /* pc_relative */
725 0, /* bitpos */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
733
734 HOWTO (R_ARM_MOVT_PREL, /* type */
735 0, /* rightshift */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
737 16, /* bitsize */
738 TRUE, /* pc_relative */
739 0, /* bitpos */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
747
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
749 0, /* rightshift */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
751 16, /* bitsize */
752 FALSE, /* pc_relative */
753 0, /* bitpos */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
761
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
763 0, /* rightshift */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
765 16, /* bitsize */
766 FALSE, /* pc_relative */
767 0, /* bitpos */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
775
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
777 0, /* rightshift */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
779 16, /* bitsize */
780 TRUE, /* pc_relative */
781 0, /* bitpos */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
789
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
791 0, /* rightshift */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
793 16, /* bitsize */
794 TRUE, /* pc_relative */
795 0, /* bitpos */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
803
804 HOWTO (R_ARM_THM_JUMP19, /* type */
805 1, /* rightshift */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
807 19, /* bitsize */
808 TRUE, /* pc_relative */
809 0, /* bitpos */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
817
818 HOWTO (R_ARM_THM_JUMP6, /* type */
819 1, /* rightshift */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
821 6, /* bitsize */
822 TRUE, /* pc_relative */
823 0, /* bitpos */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
831
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
834 versa. */
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
836 0, /* rightshift */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
838 13, /* bitsize */
839 TRUE, /* pc_relative */
840 0, /* bitpos */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
848
849 HOWTO (R_ARM_THM_PC12, /* type */
850 0, /* rightshift */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
852 13, /* bitsize */
853 TRUE, /* pc_relative */
854 0, /* bitpos */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
862
863 HOWTO (R_ARM_ABS32_NOI, /* type */
864 0, /* rightshift */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
866 32, /* bitsize */
867 FALSE, /* pc_relative */
868 0, /* bitpos */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
876
877 HOWTO (R_ARM_REL32_NOI, /* type */
878 0, /* rightshift */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
880 32, /* bitsize */
881 TRUE, /* pc_relative */
882 0, /* bitpos */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
890
891 /* Group relocations. */
892
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
894 0, /* rightshift */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
896 32, /* bitsize */
897 TRUE, /* pc_relative */
898 0, /* bitpos */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
906
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
908 0, /* rightshift */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
910 32, /* bitsize */
911 TRUE, /* pc_relative */
912 0, /* bitpos */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
920
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
922 0, /* rightshift */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
924 32, /* bitsize */
925 TRUE, /* pc_relative */
926 0, /* bitpos */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
934
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
936 0, /* rightshift */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
938 32, /* bitsize */
939 TRUE, /* pc_relative */
940 0, /* bitpos */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
948
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
950 0, /* rightshift */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
952 32, /* bitsize */
953 TRUE, /* pc_relative */
954 0, /* bitpos */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
962
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
964 0, /* rightshift */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
966 32, /* bitsize */
967 TRUE, /* pc_relative */
968 0, /* bitpos */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
976
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
978 0, /* rightshift */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
980 32, /* bitsize */
981 TRUE, /* pc_relative */
982 0, /* bitpos */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
990
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
992 0, /* rightshift */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
994 32, /* bitsize */
995 TRUE, /* pc_relative */
996 0, /* bitpos */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1004
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1006 0, /* rightshift */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1008 32, /* bitsize */
1009 TRUE, /* pc_relative */
1010 0, /* bitpos */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1018
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1020 0, /* rightshift */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1022 32, /* bitsize */
1023 TRUE, /* pc_relative */
1024 0, /* bitpos */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1032
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1034 0, /* rightshift */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 32, /* bitsize */
1037 TRUE, /* pc_relative */
1038 0, /* bitpos */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1046
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1048 0, /* rightshift */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 32, /* bitsize */
1051 TRUE, /* pc_relative */
1052 0, /* bitpos */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1060
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1062 0, /* rightshift */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1064 32, /* bitsize */
1065 TRUE, /* pc_relative */
1066 0, /* bitpos */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1074
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1076 0, /* rightshift */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1078 32, /* bitsize */
1079 TRUE, /* pc_relative */
1080 0, /* bitpos */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1088
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1090 0, /* rightshift */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1092 32, /* bitsize */
1093 TRUE, /* pc_relative */
1094 0, /* bitpos */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1102
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1104 0, /* rightshift */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1106 32, /* bitsize */
1107 TRUE, /* pc_relative */
1108 0, /* bitpos */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1116
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1118 0, /* rightshift */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1120 32, /* bitsize */
1121 TRUE, /* pc_relative */
1122 0, /* bitpos */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1130
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1132 0, /* rightshift */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1134 32, /* bitsize */
1135 TRUE, /* pc_relative */
1136 0, /* bitpos */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1144
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1146 0, /* rightshift */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1148 32, /* bitsize */
1149 TRUE, /* pc_relative */
1150 0, /* bitpos */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1158
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1160 0, /* rightshift */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1162 32, /* bitsize */
1163 TRUE, /* pc_relative */
1164 0, /* bitpos */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1172
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1174 0, /* rightshift */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1176 32, /* bitsize */
1177 TRUE, /* pc_relative */
1178 0, /* bitpos */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1186
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1188 0, /* rightshift */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1190 32, /* bitsize */
1191 TRUE, /* pc_relative */
1192 0, /* bitpos */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1200
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1202 0, /* rightshift */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1204 32, /* bitsize */
1205 TRUE, /* pc_relative */
1206 0, /* bitpos */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1214
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1216 0, /* rightshift */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 32, /* bitsize */
1219 TRUE, /* pc_relative */
1220 0, /* bitpos */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1228
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1230 0, /* rightshift */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1232 32, /* bitsize */
1233 TRUE, /* pc_relative */
1234 0, /* bitpos */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1242
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1244 0, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1246 32, /* bitsize */
1247 TRUE, /* pc_relative */
1248 0, /* bitpos */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1256
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1258 0, /* rightshift */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1260 32, /* bitsize */
1261 TRUE, /* pc_relative */
1262 0, /* bitpos */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1270
1271 /* End of group relocations. */
1272
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1274 0, /* rightshift */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1276 16, /* bitsize */
1277 FALSE, /* pc_relative */
1278 0, /* bitpos */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1286
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1288 0, /* rightshift */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1290 16, /* bitsize */
1291 FALSE, /* pc_relative */
1292 0, /* bitpos */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1300
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1302 0, /* rightshift */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1304 16, /* bitsize */
1305 FALSE, /* pc_relative */
1306 0, /* bitpos */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1314
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1316 0, /* rightshift */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1318 16, /* bitsize */
1319 FALSE, /* pc_relative */
1320 0, /* bitpos */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1328
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1330 0, /* rightshift */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1332 16, /* bitsize */
1333 FALSE, /* pc_relative */
1334 0, /* bitpos */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1342
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1344 0, /* rightshift */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1346 16, /* bitsize */
1347 FALSE, /* pc_relative */
1348 0, /* bitpos */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1356
1357 EMPTY_HOWTO (90), /* Unallocated. */
1358 EMPTY_HOWTO (91),
1359 EMPTY_HOWTO (92),
1360 EMPTY_HOWTO (93),
1361
1362 HOWTO (R_ARM_PLT32_ABS, /* type */
1363 0, /* rightshift */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 32, /* bitsize */
1366 FALSE, /* pc_relative */
1367 0, /* bitpos */
1368 complain_overflow_dont,/* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 "R_ARM_PLT32_ABS", /* name */
1371 FALSE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1375
1376 HOWTO (R_ARM_GOT_ABS, /* type */
1377 0, /* rightshift */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 32, /* bitsize */
1380 FALSE, /* pc_relative */
1381 0, /* bitpos */
1382 complain_overflow_dont,/* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 "R_ARM_GOT_ABS", /* name */
1385 FALSE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1389
1390 HOWTO (R_ARM_GOT_PREL, /* type */
1391 0, /* rightshift */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 32, /* bitsize */
1394 TRUE, /* pc_relative */
1395 0, /* bitpos */
1396 complain_overflow_dont, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 "R_ARM_GOT_PREL", /* name */
1399 FALSE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 TRUE), /* pcrel_offset */
1403
1404 HOWTO (R_ARM_GOT_BREL12, /* type */
1405 0, /* rightshift */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 12, /* bitsize */
1408 FALSE, /* pc_relative */
1409 0, /* bitpos */
1410 complain_overflow_bitfield,/* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 "R_ARM_GOT_BREL12", /* name */
1413 FALSE, /* partial_inplace */
1414 0x00000fff, /* src_mask */
1415 0x00000fff, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1417
1418 HOWTO (R_ARM_GOTOFF12, /* type */
1419 0, /* rightshift */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 12, /* bitsize */
1422 FALSE, /* pc_relative */
1423 0, /* bitpos */
1424 complain_overflow_bitfield,/* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1426 "R_ARM_GOTOFF12", /* name */
1427 FALSE, /* partial_inplace */
1428 0x00000fff, /* src_mask */
1429 0x00000fff, /* dst_mask */
1430 FALSE), /* pcrel_offset */
1431
1432 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1433
1434 /* GNU extension to record C++ vtable member usage */
1435 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1436 0, /* rightshift */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1438 0, /* bitsize */
1439 FALSE, /* pc_relative */
1440 0, /* bitpos */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1443 "R_ARM_GNU_VTENTRY", /* name */
1444 FALSE, /* partial_inplace */
1445 0, /* src_mask */
1446 0, /* dst_mask */
1447 FALSE), /* pcrel_offset */
1448
1449 /* GNU extension to record C++ vtable hierarchy */
1450 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1451 0, /* rightshift */
1452 2, /* size (0 = byte, 1 = short, 2 = long) */
1453 0, /* bitsize */
1454 FALSE, /* pc_relative */
1455 0, /* bitpos */
1456 complain_overflow_dont, /* complain_on_overflow */
1457 NULL, /* special_function */
1458 "R_ARM_GNU_VTINHERIT", /* name */
1459 FALSE, /* partial_inplace */
1460 0, /* src_mask */
1461 0, /* dst_mask */
1462 FALSE), /* pcrel_offset */
1463
1464 HOWTO (R_ARM_THM_JUMP11, /* type */
1465 1, /* rightshift */
1466 1, /* size (0 = byte, 1 = short, 2 = long) */
1467 11, /* bitsize */
1468 TRUE, /* pc_relative */
1469 0, /* bitpos */
1470 complain_overflow_signed, /* complain_on_overflow */
1471 bfd_elf_generic_reloc, /* special_function */
1472 "R_ARM_THM_JUMP11", /* name */
1473 FALSE, /* partial_inplace */
1474 0x000007ff, /* src_mask */
1475 0x000007ff, /* dst_mask */
1476 TRUE), /* pcrel_offset */
1477
1478 HOWTO (R_ARM_THM_JUMP8, /* type */
1479 1, /* rightshift */
1480 1, /* size (0 = byte, 1 = short, 2 = long) */
1481 8, /* bitsize */
1482 TRUE, /* pc_relative */
1483 0, /* bitpos */
1484 complain_overflow_signed, /* complain_on_overflow */
1485 bfd_elf_generic_reloc, /* special_function */
1486 "R_ARM_THM_JUMP8", /* name */
1487 FALSE, /* partial_inplace */
1488 0x000000ff, /* src_mask */
1489 0x000000ff, /* dst_mask */
1490 TRUE), /* pcrel_offset */
1491
1492 /* TLS relocations */
1493 HOWTO (R_ARM_TLS_GD32, /* type */
1494 0, /* rightshift */
1495 2, /* size (0 = byte, 1 = short, 2 = long) */
1496 32, /* bitsize */
1497 FALSE, /* pc_relative */
1498 0, /* bitpos */
1499 complain_overflow_bitfield,/* complain_on_overflow */
1500 NULL, /* special_function */
1501 "R_ARM_TLS_GD32", /* name */
1502 TRUE, /* partial_inplace */
1503 0xffffffff, /* src_mask */
1504 0xffffffff, /* dst_mask */
1505 FALSE), /* pcrel_offset */
1506
1507 HOWTO (R_ARM_TLS_LDM32, /* type */
1508 0, /* rightshift */
1509 2, /* size (0 = byte, 1 = short, 2 = long) */
1510 32, /* bitsize */
1511 FALSE, /* pc_relative */
1512 0, /* bitpos */
1513 complain_overflow_bitfield,/* complain_on_overflow */
1514 bfd_elf_generic_reloc, /* special_function */
1515 "R_ARM_TLS_LDM32", /* name */
1516 TRUE, /* partial_inplace */
1517 0xffffffff, /* src_mask */
1518 0xffffffff, /* dst_mask */
1519 FALSE), /* pcrel_offset */
1520
1521 HOWTO (R_ARM_TLS_LDO32, /* type */
1522 0, /* rightshift */
1523 2, /* size (0 = byte, 1 = short, 2 = long) */
1524 32, /* bitsize */
1525 FALSE, /* pc_relative */
1526 0, /* bitpos */
1527 complain_overflow_bitfield,/* complain_on_overflow */
1528 bfd_elf_generic_reloc, /* special_function */
1529 "R_ARM_TLS_LDO32", /* name */
1530 TRUE, /* partial_inplace */
1531 0xffffffff, /* src_mask */
1532 0xffffffff, /* dst_mask */
1533 FALSE), /* pcrel_offset */
1534
1535 HOWTO (R_ARM_TLS_IE32, /* type */
1536 0, /* rightshift */
1537 2, /* size (0 = byte, 1 = short, 2 = long) */
1538 32, /* bitsize */
1539 FALSE, /* pc_relative */
1540 0, /* bitpos */
1541 complain_overflow_bitfield,/* complain_on_overflow */
1542 NULL, /* special_function */
1543 "R_ARM_TLS_IE32", /* name */
1544 TRUE, /* partial_inplace */
1545 0xffffffff, /* src_mask */
1546 0xffffffff, /* dst_mask */
1547 FALSE), /* pcrel_offset */
1548
1549 HOWTO (R_ARM_TLS_LE32, /* type */
1550 0, /* rightshift */
1551 2, /* size (0 = byte, 1 = short, 2 = long) */
1552 32, /* bitsize */
1553 FALSE, /* pc_relative */
1554 0, /* bitpos */
1555 complain_overflow_bitfield,/* complain_on_overflow */
1556 bfd_elf_generic_reloc, /* special_function */
1557 "R_ARM_TLS_LE32", /* name */
1558 TRUE, /* partial_inplace */
1559 0xffffffff, /* src_mask */
1560 0xffffffff, /* dst_mask */
1561 FALSE), /* pcrel_offset */
1562
1563 HOWTO (R_ARM_TLS_LDO12, /* type */
1564 0, /* rightshift */
1565 2, /* size (0 = byte, 1 = short, 2 = long) */
1566 12, /* bitsize */
1567 FALSE, /* pc_relative */
1568 0, /* bitpos */
1569 complain_overflow_bitfield,/* complain_on_overflow */
1570 bfd_elf_generic_reloc, /* special_function */
1571 "R_ARM_TLS_LDO12", /* name */
1572 FALSE, /* partial_inplace */
1573 0x00000fff, /* src_mask */
1574 0x00000fff, /* dst_mask */
1575 FALSE), /* pcrel_offset */
1576
1577 HOWTO (R_ARM_TLS_LE12, /* type */
1578 0, /* rightshift */
1579 2, /* size (0 = byte, 1 = short, 2 = long) */
1580 12, /* bitsize */
1581 FALSE, /* pc_relative */
1582 0, /* bitpos */
1583 complain_overflow_bitfield,/* complain_on_overflow */
1584 bfd_elf_generic_reloc, /* special_function */
1585 "R_ARM_TLS_LE12", /* name */
1586 FALSE, /* partial_inplace */
1587 0x00000fff, /* src_mask */
1588 0x00000fff, /* dst_mask */
1589 FALSE), /* pcrel_offset */
1590
1591 HOWTO (R_ARM_TLS_IE12GP, /* type */
1592 0, /* rightshift */
1593 2, /* size (0 = byte, 1 = short, 2 = long) */
1594 12, /* bitsize */
1595 FALSE, /* pc_relative */
1596 0, /* bitpos */
1597 complain_overflow_bitfield,/* complain_on_overflow */
1598 bfd_elf_generic_reloc, /* special_function */
1599 "R_ARM_TLS_IE12GP", /* name */
1600 FALSE, /* partial_inplace */
1601 0x00000fff, /* src_mask */
1602 0x00000fff, /* dst_mask */
1603 FALSE), /* pcrel_offset */
1604 };
1605
1606 /* 112-127 private relocations
1607 128 R_ARM_ME_TOO, obsolete
1608 129-255 unallocated in AAELF.
1609
1610 249-255 extended, currently unused, relocations: */
1611
1612 static reloc_howto_type elf32_arm_howto_table_2[4] =
1613 {
1614 HOWTO (R_ARM_RREL32, /* type */
1615 0, /* rightshift */
1616 0, /* size (0 = byte, 1 = short, 2 = long) */
1617 0, /* bitsize */
1618 FALSE, /* pc_relative */
1619 0, /* bitpos */
1620 complain_overflow_dont,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_RREL32", /* name */
1623 FALSE, /* partial_inplace */
1624 0, /* src_mask */
1625 0, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1627
1628 HOWTO (R_ARM_RABS32, /* type */
1629 0, /* rightshift */
1630 0, /* size (0 = byte, 1 = short, 2 = long) */
1631 0, /* bitsize */
1632 FALSE, /* pc_relative */
1633 0, /* bitpos */
1634 complain_overflow_dont,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_RABS32", /* name */
1637 FALSE, /* partial_inplace */
1638 0, /* src_mask */
1639 0, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1641
1642 HOWTO (R_ARM_RPC24, /* type */
1643 0, /* rightshift */
1644 0, /* size (0 = byte, 1 = short, 2 = long) */
1645 0, /* bitsize */
1646 FALSE, /* pc_relative */
1647 0, /* bitpos */
1648 complain_overflow_dont,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_RPC24", /* name */
1651 FALSE, /* partial_inplace */
1652 0, /* src_mask */
1653 0, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1655
1656 HOWTO (R_ARM_RBASE, /* type */
1657 0, /* rightshift */
1658 0, /* size (0 = byte, 1 = short, 2 = long) */
1659 0, /* bitsize */
1660 FALSE, /* pc_relative */
1661 0, /* bitpos */
1662 complain_overflow_dont,/* complain_on_overflow */
1663 bfd_elf_generic_reloc, /* special_function */
1664 "R_ARM_RBASE", /* name */
1665 FALSE, /* partial_inplace */
1666 0, /* src_mask */
1667 0, /* dst_mask */
1668 FALSE) /* pcrel_offset */
1669 };
1670
1671 static reloc_howto_type *
1672 elf32_arm_howto_from_type (unsigned int r_type)
1673 {
1674 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1675 return &elf32_arm_howto_table_1[r_type];
1676
1677 if (r_type >= R_ARM_RREL32
1678 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1679 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1680
1681 return NULL;
1682 }
1683
1684 static void
1685 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1686 Elf_Internal_Rela * elf_reloc)
1687 {
1688 unsigned int r_type;
1689
1690 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1691 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 }
1693
1694 struct elf32_arm_reloc_map
1695 {
1696 bfd_reloc_code_real_type bfd_reloc_val;
1697 unsigned char elf_reloc_val;
1698 };
1699
1700 /* All entries in this list must also be present in elf32_arm_howto_table. */
1701 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1702 {
1703 {BFD_RELOC_NONE, R_ARM_NONE},
1704 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1705 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1706 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1707 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1708 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1709 {BFD_RELOC_32, R_ARM_ABS32},
1710 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1711 {BFD_RELOC_8, R_ARM_ABS8},
1712 {BFD_RELOC_16, R_ARM_ABS16},
1713 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1714 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1719 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1720 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1721 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1722 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1723 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1724 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1725 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1726 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1727 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1728 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1729 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1730 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1731 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1732 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1733 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1734 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1735 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1736 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1737 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1738 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1739 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1740 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1741 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1742 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1743 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1744 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1745 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1746 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1747 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1748 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1750 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1751 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1752 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1754 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1755 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1756 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1757 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1758 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1759 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1760 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1761 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1762 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1763 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1764 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1765 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1766 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1768 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1769 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1770 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1771 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1772 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1773 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1774 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1775 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1776 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1777 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1778 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1779 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1780 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 };
1782
1783 static reloc_howto_type *
1784 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1785 bfd_reloc_code_real_type code)
1786 {
1787 unsigned int i;
1788
1789 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1790 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1791 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1792
1793 return NULL;
1794 }
1795
1796 static reloc_howto_type *
1797 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1798 const char *r_name)
1799 {
1800 unsigned int i;
1801
1802 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1803 if (elf32_arm_howto_table_1[i].name != NULL
1804 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1805 return &elf32_arm_howto_table_1[i];
1806
1807 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1808 if (elf32_arm_howto_table_2[i].name != NULL
1809 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1810 return &elf32_arm_howto_table_2[i];
1811
1812 return NULL;
1813 }
1814
1815 /* Support for core dump NOTE sections. */
1816
1817 static bfd_boolean
1818 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1819 {
1820 int offset;
1821 size_t size;
1822
1823 switch (note->descsz)
1824 {
1825 default:
1826 return FALSE;
1827
1828 case 148: /* Linux/ARM 32-bit. */
1829 /* pr_cursig */
1830 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831
1832 /* pr_pid */
1833 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1834
1835 /* pr_reg */
1836 offset = 72;
1837 size = 72;
1838
1839 break;
1840 }
1841
1842 /* Make a ".reg/999" section. */
1843 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1844 size, note->descpos + offset);
1845 }
1846
1847 static bfd_boolean
1848 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1849 {
1850 switch (note->descsz)
1851 {
1852 default:
1853 return FALSE;
1854
1855 case 124: /* Linux/ARM elf_prpsinfo. */
1856 elf_tdata (abfd)->core_program
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1858 elf_tdata (abfd)->core_command
1859 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 }
1861
1862 /* Note that for some reason, a spurious space is tacked
1863 onto the end of the args in some (at least one anyway)
1864 implementations, so strip it off if it exists. */
1865 {
1866 char *command = elf_tdata (abfd)->core_command;
1867 int n = strlen (command);
1868
1869 if (0 < n && command[n - 1] == ' ')
1870 command[n - 1] = '\0';
1871 }
1872
1873 return TRUE;
1874 }
1875
1876 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1877 #define TARGET_LITTLE_NAME "elf32-littlearm"
1878 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1879 #define TARGET_BIG_NAME "elf32-bigarm"
1880
1881 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1882 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1883
1884 typedef unsigned long int insn32;
1885 typedef unsigned short int insn16;
1886
1887 /* In lieu of proper flags, assume all EABIv4 or later objects are
1888 interworkable. */
1889 #define INTERWORK_FLAG(abfd) \
1890 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1891 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1892 || ((abfd)->flags & BFD_LINKER_CREATED))
1893
1894 /* The linker script knows the section names for placement.
1895 The entry_names are used to do simple name mangling on the stubs.
1896 Given a function name, and its type, the stub can be found. The
1897 name can be changed. The only requirement is the %s be present. */
1898 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1899 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1900
1901 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1902 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1903
1904 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1905 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1906
1907 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1908 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1909
1910 #define STUB_ENTRY_NAME "__%s_veneer"
1911
1912 /* The name of the dynamic interpreter. This is put in the .interp
1913 section. */
1914 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1915
1916 #ifdef FOUR_WORD_PLT
1917
1918 /* The first entry in a procedure linkage table looks like
1919 this. It is set up so that any shared library function that is
1920 called before the relocation has been set up calls the dynamic
1921 linker first. */
1922 static const bfd_vma elf32_arm_plt0_entry [] =
1923 {
1924 0xe52de004, /* str lr, [sp, #-4]! */
1925 0xe59fe010, /* ldr lr, [pc, #16] */
1926 0xe08fe00e, /* add lr, pc, lr */
1927 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 };
1929
1930 /* Subsequent entries in a procedure linkage table look like
1931 this. */
1932 static const bfd_vma elf32_arm_plt_entry [] =
1933 {
1934 0xe28fc600, /* add ip, pc, #NN */
1935 0xe28cca00, /* add ip, ip, #NN */
1936 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1937 0x00000000, /* unused */
1938 };
1939
1940 #else
1941
1942 /* The first entry in a procedure linkage table looks like
1943 this. It is set up so that any shared library function that is
1944 called before the relocation has been set up calls the dynamic
1945 linker first. */
1946 static const bfd_vma elf32_arm_plt0_entry [] =
1947 {
1948 0xe52de004, /* str lr, [sp, #-4]! */
1949 0xe59fe004, /* ldr lr, [pc, #4] */
1950 0xe08fe00e, /* add lr, pc, lr */
1951 0xe5bef008, /* ldr pc, [lr, #8]! */
1952 0x00000000, /* &GOT[0] - . */
1953 };
1954
1955 /* Subsequent entries in a procedure linkage table look like
1956 this. */
1957 static const bfd_vma elf32_arm_plt_entry [] =
1958 {
1959 0xe28fc600, /* add ip, pc, #0xNN00000 */
1960 0xe28cca00, /* add ip, ip, #0xNN000 */
1961 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1962 };
1963
1964 #endif
1965
1966 /* The format of the first entry in the procedure linkage table
1967 for a VxWorks executable. */
1968 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1969 {
1970 0xe52dc008, /* str ip,[sp,#-8]! */
1971 0xe59fc000, /* ldr ip,[pc] */
1972 0xe59cf008, /* ldr pc,[ip,#8] */
1973 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 };
1975
1976 /* The format of subsequent entries in a VxWorks executable. */
1977 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1978 {
1979 0xe59fc000, /* ldr ip,[pc] */
1980 0xe59cf000, /* ldr pc,[ip] */
1981 0x00000000, /* .long @got */
1982 0xe59fc000, /* ldr ip,[pc] */
1983 0xea000000, /* b _PLT */
1984 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 };
1986
1987 /* The format of entries in a VxWorks shared library. */
1988 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1989 {
1990 0xe59fc000, /* ldr ip,[pc] */
1991 0xe79cf009, /* ldr pc,[ip,r9] */
1992 0x00000000, /* .long @got */
1993 0xe59fc000, /* ldr ip,[pc] */
1994 0xe599f008, /* ldr pc,[r9,#8] */
1995 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 };
1997
1998 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1999 #define PLT_THUMB_STUB_SIZE 4
2000 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2001 {
2002 0x4778, /* bx pc */
2003 0x46c0 /* nop */
2004 };
2005
2006 /* The entries in a PLT when using a DLL-based target with multiple
2007 address spaces. */
2008 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2009 {
2010 0xe51ff004, /* ldr pc, [pc, #-4] */
2011 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 };
2013
2014 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2015 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2016 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2017 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2018 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2019 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2020
2021 enum stub_insn_type
2022 {
2023 THUMB16_TYPE = 1,
2024 THUMB32_TYPE,
2025 ARM_TYPE,
2026 DATA_TYPE
2027 };
2028
2029 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2030 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2031 is inserted in arm_build_one_stub(). */
2032 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2033 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2034 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2035 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2036 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2037 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2038
2039 typedef struct
2040 {
2041 bfd_vma data;
2042 enum stub_insn_type type;
2043 unsigned int r_type;
2044 int reloc_addend;
2045 } insn_sequence;
2046
2047 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2048 to reach the stub if necessary. */
2049 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2050 {
2051 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2052 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 };
2054
2055 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2056 available. */
2057 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2058 {
2059 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2060 ARM_INSN(0xe12fff1c), /* bx ip */
2061 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 };
2063
2064 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2065 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2066 {
2067 THUMB16_INSN(0xb401), /* push {r0} */
2068 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2069 THUMB16_INSN(0x4684), /* mov ip, r0 */
2070 THUMB16_INSN(0xbc01), /* pop {r0} */
2071 THUMB16_INSN(0x4760), /* bx ip */
2072 THUMB16_INSN(0xbf00), /* nop */
2073 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 };
2075
2076 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2077 allowed. */
2078 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2079 {
2080 THUMB16_INSN(0x4778), /* bx pc */
2081 THUMB16_INSN(0x46c0), /* nop */
2082 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2083 ARM_INSN(0xe12fff1c), /* bx ip */
2084 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 };
2086
2087 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2088 available. */
2089 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2090 {
2091 THUMB16_INSN(0x4778), /* bx pc */
2092 THUMB16_INSN(0x46c0), /* nop */
2093 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2094 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 };
2096
2097 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2098 one, when the destination is close enough. */
2099 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2100 {
2101 THUMB16_INSN(0x4778), /* bx pc */
2102 THUMB16_INSN(0x46c0), /* nop */
2103 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 };
2105
2106 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2107 blx to reach the stub if necessary. */
2108 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2109 {
2110 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2111 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2112 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 };
2114
2115 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2116 blx to reach the stub if necessary. We can not add into pc;
2117 it is not guaranteed to mode switch (different in ARMv6 and
2118 ARMv7). */
2119 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2120 {
2121 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2122 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2123 ARM_INSN(0xe12fff1c), /* bx ip */
2124 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 };
2126
2127 /* V4T ARM -> ARM long branch stub, PIC. */
2128 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2129 {
2130 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2131 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2132 ARM_INSN(0xe12fff1c), /* bx ip */
2133 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 };
2135
2136 /* V4T Thumb -> ARM long branch stub, PIC. */
2137 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2138 {
2139 THUMB16_INSN(0x4778), /* bx pc */
2140 THUMB16_INSN(0x46c0), /* nop */
2141 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2142 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2143 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 };
2145
2146 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2147 architectures. */
2148 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2149 {
2150 THUMB16_INSN(0xb401), /* push {r0} */
2151 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2152 THUMB16_INSN(0x46fc), /* mov ip, pc */
2153 THUMB16_INSN(0x4484), /* add ip, r0 */
2154 THUMB16_INSN(0xbc01), /* pop {r0} */
2155 THUMB16_INSN(0x4760), /* bx ip */
2156 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 };
2158
2159 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2160 allowed. */
2161 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2162 {
2163 THUMB16_INSN(0x4778), /* bx pc */
2164 THUMB16_INSN(0x46c0), /* nop */
2165 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2166 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2167 ARM_INSN(0xe12fff1c), /* bx ip */
2168 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 };
2170
2171 /* Cortex-A8 erratum-workaround stubs. */
2172
2173 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2174 can't use a conditional branch to reach this stub). */
2175
2176 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2177 {
2178 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2179 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2180 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 };
2182
2183 /* Stub used for b.w and bl.w instructions. */
2184
2185 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2186 {
2187 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 };
2189
2190 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2191 {
2192 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 };
2194
2195 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2196 instruction (which switches to ARM mode) to point to this stub. Jump to the
2197 real destination using an ARM-mode branch. */
2198
2199 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2200 {
2201 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 };
2203
2204 /* Section name for stubs is the associated section name plus this
2205 string. */
2206 #define STUB_SUFFIX ".stub"
2207
2208 /* One entry per long/short branch stub defined above. */
2209 #define DEF_STUBS \
2210 DEF_STUB(long_branch_any_any) \
2211 DEF_STUB(long_branch_v4t_arm_thumb) \
2212 DEF_STUB(long_branch_thumb_only) \
2213 DEF_STUB(long_branch_v4t_thumb_thumb) \
2214 DEF_STUB(long_branch_v4t_thumb_arm) \
2215 DEF_STUB(short_branch_v4t_thumb_arm) \
2216 DEF_STUB(long_branch_any_arm_pic) \
2217 DEF_STUB(long_branch_any_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2219 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2220 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2221 DEF_STUB(long_branch_thumb_only_pic) \
2222 DEF_STUB(a8_veneer_b_cond) \
2223 DEF_STUB(a8_veneer_b) \
2224 DEF_STUB(a8_veneer_bl) \
2225 DEF_STUB(a8_veneer_blx)
2226
2227 #define DEF_STUB(x) arm_stub_##x,
2228 enum elf32_arm_stub_type {
2229 arm_stub_none,
2230 DEF_STUBS
2231 };
2232 #undef DEF_STUB
2233
2234 typedef struct
2235 {
2236 const insn_sequence* template;
2237 int template_size;
2238 } stub_def;
2239
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2242 {NULL, 0},
2243 DEF_STUBS
2244 };
2245
2246 struct elf32_arm_stub_hash_entry
2247 {
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2250
2251 /* The stub section. */
2252 asection *stub_sec;
2253
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2256
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2261
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2264
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2268
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2272 int stub_size;
2273 /* Its template. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2277
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2280
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2283
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2286 asection *id_sec;
2287
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2291 char *output_name;
2292 };
2293
2294 /* Used to build a map of a section. This is required for mixed-endian
2295 code/data. */
2296
2297 typedef struct elf32_elf_section_map
2298 {
2299 bfd_vma vma;
2300 char type;
2301 }
2302 elf32_arm_section_map;
2303
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2305
2306 typedef enum
2307 {
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2312 }
2313 elf32_vfp11_erratum_type;
2314
2315 typedef struct elf32_vfp11_erratum_list
2316 {
2317 struct elf32_vfp11_erratum_list *next;
2318 bfd_vma vma;
2319 union
2320 {
2321 struct
2322 {
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2325 } b;
2326 struct
2327 {
2328 struct elf32_vfp11_erratum_list *branch;
2329 unsigned int id;
2330 } v;
2331 } u;
2332 elf32_vfp11_erratum_type type;
2333 }
2334 elf32_vfp11_erratum_list;
2335
2336 typedef enum
2337 {
2338 DELETE_EXIDX_ENTRY,
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2340 }
2341 arm_unwind_edit_type;
2342
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2345 {
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2351 unsigned int index;
2352 struct arm_unwind_table_edit *next;
2353 }
2354 arm_unwind_table_edit;
2355
2356 typedef struct _arm_elf_section_data
2357 {
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2367 union
2368 {
2369 /* Unwind info attached to a text section. */
2370 struct
2371 {
2372 asection *arm_exidx_sec;
2373 } text;
2374
2375 /* Unwind info attached to an .ARM.exidx section. */
2376 struct
2377 {
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2380 } exidx;
2381 } u;
2382 }
2383 _arm_elf_section_data;
2384
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2387
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2393
2394 struct a8_erratum_fix {
2395 bfd *input_bfd;
2396 asection *section;
2397 bfd_vma offset;
2398 bfd_vma addend;
2399 unsigned long orig_insn;
2400 char *stub_name;
2401 enum elf32_arm_stub_type stub_type;
2402 };
2403
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2405 erratum. */
2406
2407 struct a8_erratum_reloc {
2408 bfd_vma from;
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2414 };
2415
2416 /* The size of the thread control block. */
2417 #define TCB_SIZE 8
2418
2419 struct elf_arm_obj_tdata
2420 {
2421 struct elf_obj_tdata root;
2422
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2425
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2428
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2431 };
2432
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2435
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2438
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_TDATA)
2443
2444 static bfd_boolean
2445 elf32_arm_mkobject (bfd *abfd)
2446 {
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2448 ARM_ELF_TDATA);
2449 }
2450
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2456
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2460 {
2461 /* Next section. */
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2464 asection * section;
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2469 };
2470
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2472
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2475 {
2476 struct elf_link_hash_entry root;
2477
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2480
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2484
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2488
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2493
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2499
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2503
2504 /* A pointer to the most recently used stub hash entry against this
2505 symbol. */
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2507 };
2508
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2512 (&(table)->root, \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2514 (info)))
2515
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 ((struct elf32_arm_link_hash_table *) ((info)->hash))
2519
2520 #define arm_stub_hash_lookup(table, string, create, copy) \
2521 ((struct elf32_arm_stub_hash_entry *) \
2522 bfd_hash_lookup ((table), (string), (create), (copy)))
2523
2524 /* ARM ELF linker hash table. */
2525 struct elf32_arm_link_hash_table
2526 {
2527 /* The main hash table. */
2528 struct elf_link_hash_table root;
2529
2530 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2531 bfd_size_type thumb_glue_size;
2532
2533 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2534 bfd_size_type arm_glue_size;
2535
2536 /* The size in bytes of section containing the ARMv4 BX veneers. */
2537 bfd_size_type bx_glue_size;
2538
2539 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2540 veneer has been populated. */
2541 bfd_vma bx_glue_offset[15];
2542
2543 /* The size in bytes of the section containing glue for VFP11 erratum
2544 veneers. */
2545 bfd_size_type vfp11_erratum_glue_size;
2546
2547 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2548 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2549 elf32_arm_write_section(). */
2550 struct a8_erratum_fix *a8_erratum_fixes;
2551 unsigned int num_a8_erratum_fixes;
2552
2553 /* An arbitrary input BFD chosen to hold the glue sections. */
2554 bfd * bfd_of_glue_owner;
2555
2556 /* Nonzero to output a BE8 image. */
2557 int byteswap_code;
2558
2559 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2560 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2561 int target1_is_rel;
2562
2563 /* The relocation to use for R_ARM_TARGET2 relocations. */
2564 int target2_reloc;
2565
2566 /* 0 = Ignore R_ARM_V4BX.
2567 1 = Convert BX to MOV PC.
2568 2 = Generate v4 interworing stubs. */
2569 int fix_v4bx;
2570
2571 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2572 int fix_cortex_a8;
2573
2574 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2575 int use_blx;
2576
2577 /* What sort of code sequences we should look for which may trigger the
2578 VFP11 denorm erratum. */
2579 bfd_arm_vfp11_fix vfp11_fix;
2580
2581 /* Global counter for the number of fixes we have emitted. */
2582 int num_vfp11_fixes;
2583
2584 /* Nonzero to force PIC branch veneers. */
2585 int pic_veneer;
2586
2587 /* The number of bytes in the initial entry in the PLT. */
2588 bfd_size_type plt_header_size;
2589
2590 /* The number of bytes in the subsequent PLT etries. */
2591 bfd_size_type plt_entry_size;
2592
2593 /* True if the target system is VxWorks. */
2594 int vxworks_p;
2595
2596 /* True if the target system is Symbian OS. */
2597 int symbian_p;
2598
2599 /* True if the target uses REL relocations. */
2600 int use_rel;
2601
2602 /* Short-cuts to get to dynamic linker sections. */
2603 asection *sgot;
2604 asection *sgotplt;
2605 asection *srelgot;
2606 asection *splt;
2607 asection *srelplt;
2608 asection *sdynbss;
2609 asection *srelbss;
2610
2611 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2612 asection *srelplt2;
2613
2614 /* Data for R_ARM_TLS_LDM32 relocations. */
2615 union
2616 {
2617 bfd_signed_vma refcount;
2618 bfd_vma offset;
2619 } tls_ldm_got;
2620
2621 /* Small local sym to section mapping cache. */
2622 struct sym_sec_cache sym_sec;
2623
2624 /* For convenience in allocate_dynrelocs. */
2625 bfd * obfd;
2626
2627 /* The stub hash table. */
2628 struct bfd_hash_table stub_hash_table;
2629
2630 /* Linker stub bfd. */
2631 bfd *stub_bfd;
2632
2633 /* Linker call-backs. */
2634 asection * (*add_stub_section) (const char *, asection *);
2635 void (*layout_sections_again) (void);
2636
2637 /* Array to keep track of which stub sections have been created, and
2638 information on stub grouping. */
2639 struct map_stub
2640 {
2641 /* This is the section to which stubs in the group will be
2642 attached. */
2643 asection *link_sec;
2644 /* The stub section. */
2645 asection *stub_sec;
2646 } *stub_group;
2647
2648 /* Assorted information used by elf32_arm_size_stubs. */
2649 unsigned int bfd_count;
2650 int top_index;
2651 asection **input_list;
2652 };
2653
2654 /* Create an entry in an ARM ELF linker hash table. */
2655
2656 static struct bfd_hash_entry *
2657 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2658 struct bfd_hash_table * table,
2659 const char * string)
2660 {
2661 struct elf32_arm_link_hash_entry * ret =
2662 (struct elf32_arm_link_hash_entry *) entry;
2663
2664 /* Allocate the structure if it has not already been allocated by a
2665 subclass. */
2666 if (ret == NULL)
2667 ret = bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2668 if (ret == NULL)
2669 return (struct bfd_hash_entry *) ret;
2670
2671 /* Call the allocation method of the superclass. */
2672 ret = ((struct elf32_arm_link_hash_entry *)
2673 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2674 table, string));
2675 if (ret != NULL)
2676 {
2677 ret->relocs_copied = NULL;
2678 ret->tls_type = GOT_UNKNOWN;
2679 ret->plt_thumb_refcount = 0;
2680 ret->plt_maybe_thumb_refcount = 0;
2681 ret->plt_got_offset = -1;
2682 ret->export_glue = NULL;
2683
2684 ret->stub_cache = NULL;
2685 }
2686
2687 return (struct bfd_hash_entry *) ret;
2688 }
2689
2690 /* Initialize an entry in the stub hash table. */
2691
2692 static struct bfd_hash_entry *
2693 stub_hash_newfunc (struct bfd_hash_entry *entry,
2694 struct bfd_hash_table *table,
2695 const char *string)
2696 {
2697 /* Allocate the structure if it has not already been allocated by a
2698 subclass. */
2699 if (entry == NULL)
2700 {
2701 entry = bfd_hash_allocate (table,
2702 sizeof (struct elf32_arm_stub_hash_entry));
2703 if (entry == NULL)
2704 return entry;
2705 }
2706
2707 /* Call the allocation method of the superclass. */
2708 entry = bfd_hash_newfunc (entry, table, string);
2709 if (entry != NULL)
2710 {
2711 struct elf32_arm_stub_hash_entry *eh;
2712
2713 /* Initialize the local fields. */
2714 eh = (struct elf32_arm_stub_hash_entry *) entry;
2715 eh->stub_sec = NULL;
2716 eh->stub_offset = 0;
2717 eh->target_value = 0;
2718 eh->target_section = NULL;
2719 eh->stub_type = arm_stub_none;
2720 eh->stub_size = 0;
2721 eh->stub_template = NULL;
2722 eh->stub_template_size = 0;
2723 eh->h = NULL;
2724 eh->id_sec = NULL;
2725 }
2726
2727 return entry;
2728 }
2729
2730 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2731 shortcuts to them in our hash table. */
2732
2733 static bfd_boolean
2734 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2735 {
2736 struct elf32_arm_link_hash_table *htab;
2737
2738 htab = elf32_arm_hash_table (info);
2739 /* BPABI objects never have a GOT, or associated sections. */
2740 if (htab->symbian_p)
2741 return TRUE;
2742
2743 if (! _bfd_elf_create_got_section (dynobj, info))
2744 return FALSE;
2745
2746 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2747 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2748 if (!htab->sgot || !htab->sgotplt)
2749 abort ();
2750
2751 htab->srelgot = bfd_make_section_with_flags (dynobj,
2752 RELOC_SECTION (htab, ".got"),
2753 (SEC_ALLOC | SEC_LOAD
2754 | SEC_HAS_CONTENTS
2755 | SEC_IN_MEMORY
2756 | SEC_LINKER_CREATED
2757 | SEC_READONLY));
2758 if (htab->srelgot == NULL
2759 || ! bfd_set_section_alignment (dynobj, htab->srelgot, 2))
2760 return FALSE;
2761 return TRUE;
2762 }
2763
2764 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2765 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2766 hash table. */
2767
2768 static bfd_boolean
2769 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2770 {
2771 struct elf32_arm_link_hash_table *htab;
2772
2773 htab = elf32_arm_hash_table (info);
2774 if (!htab->sgot && !create_got_section (dynobj, info))
2775 return FALSE;
2776
2777 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2778 return FALSE;
2779
2780 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2781 htab->srelplt = bfd_get_section_by_name (dynobj,
2782 RELOC_SECTION (htab, ".plt"));
2783 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2784 if (!info->shared)
2785 htab->srelbss = bfd_get_section_by_name (dynobj,
2786 RELOC_SECTION (htab, ".bss"));
2787
2788 if (htab->vxworks_p)
2789 {
2790 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2791 return FALSE;
2792
2793 if (info->shared)
2794 {
2795 htab->plt_header_size = 0;
2796 htab->plt_entry_size
2797 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2798 }
2799 else
2800 {
2801 htab->plt_header_size
2802 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2803 htab->plt_entry_size
2804 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2805 }
2806 }
2807
2808 if (!htab->splt
2809 || !htab->srelplt
2810 || !htab->sdynbss
2811 || (!info->shared && !htab->srelbss))
2812 abort ();
2813
2814 return TRUE;
2815 }
2816
2817 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2818
2819 static void
2820 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2821 struct elf_link_hash_entry *dir,
2822 struct elf_link_hash_entry *ind)
2823 {
2824 struct elf32_arm_link_hash_entry *edir, *eind;
2825
2826 edir = (struct elf32_arm_link_hash_entry *) dir;
2827 eind = (struct elf32_arm_link_hash_entry *) ind;
2828
2829 if (eind->relocs_copied != NULL)
2830 {
2831 if (edir->relocs_copied != NULL)
2832 {
2833 struct elf32_arm_relocs_copied **pp;
2834 struct elf32_arm_relocs_copied *p;
2835
2836 /* Add reloc counts against the indirect sym to the direct sym
2837 list. Merge any entries against the same section. */
2838 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2839 {
2840 struct elf32_arm_relocs_copied *q;
2841
2842 for (q = edir->relocs_copied; q != NULL; q = q->next)
2843 if (q->section == p->section)
2844 {
2845 q->pc_count += p->pc_count;
2846 q->count += p->count;
2847 *pp = p->next;
2848 break;
2849 }
2850 if (q == NULL)
2851 pp = &p->next;
2852 }
2853 *pp = edir->relocs_copied;
2854 }
2855
2856 edir->relocs_copied = eind->relocs_copied;
2857 eind->relocs_copied = NULL;
2858 }
2859
2860 if (ind->root.type == bfd_link_hash_indirect)
2861 {
2862 /* Copy over PLT info. */
2863 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2864 eind->plt_thumb_refcount = 0;
2865 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2866 eind->plt_maybe_thumb_refcount = 0;
2867
2868 if (dir->got.refcount <= 0)
2869 {
2870 edir->tls_type = eind->tls_type;
2871 eind->tls_type = GOT_UNKNOWN;
2872 }
2873 }
2874
2875 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2876 }
2877
2878 /* Create an ARM elf linker hash table. */
2879
2880 static struct bfd_link_hash_table *
2881 elf32_arm_link_hash_table_create (bfd *abfd)
2882 {
2883 struct elf32_arm_link_hash_table *ret;
2884 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2885
2886 ret = bfd_malloc (amt);
2887 if (ret == NULL)
2888 return NULL;
2889
2890 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2891 elf32_arm_link_hash_newfunc,
2892 sizeof (struct elf32_arm_link_hash_entry)))
2893 {
2894 free (ret);
2895 return NULL;
2896 }
2897
2898 ret->sgot = NULL;
2899 ret->sgotplt = NULL;
2900 ret->srelgot = NULL;
2901 ret->splt = NULL;
2902 ret->srelplt = NULL;
2903 ret->sdynbss = NULL;
2904 ret->srelbss = NULL;
2905 ret->srelplt2 = NULL;
2906 ret->thumb_glue_size = 0;
2907 ret->arm_glue_size = 0;
2908 ret->bx_glue_size = 0;
2909 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2910 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2911 ret->vfp11_erratum_glue_size = 0;
2912 ret->num_vfp11_fixes = 0;
2913 ret->fix_cortex_a8 = 0;
2914 ret->bfd_of_glue_owner = NULL;
2915 ret->byteswap_code = 0;
2916 ret->target1_is_rel = 0;
2917 ret->target2_reloc = R_ARM_NONE;
2918 #ifdef FOUR_WORD_PLT
2919 ret->plt_header_size = 16;
2920 ret->plt_entry_size = 16;
2921 #else
2922 ret->plt_header_size = 20;
2923 ret->plt_entry_size = 12;
2924 #endif
2925 ret->fix_v4bx = 0;
2926 ret->use_blx = 0;
2927 ret->vxworks_p = 0;
2928 ret->symbian_p = 0;
2929 ret->use_rel = 1;
2930 ret->sym_sec.abfd = NULL;
2931 ret->obfd = abfd;
2932 ret->tls_ldm_got.refcount = 0;
2933 ret->stub_bfd = NULL;
2934 ret->add_stub_section = NULL;
2935 ret->layout_sections_again = NULL;
2936 ret->stub_group = NULL;
2937 ret->bfd_count = 0;
2938 ret->top_index = 0;
2939 ret->input_list = NULL;
2940
2941 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2942 sizeof (struct elf32_arm_stub_hash_entry)))
2943 {
2944 free (ret);
2945 return NULL;
2946 }
2947
2948 return &ret->root.root;
2949 }
2950
2951 /* Free the derived linker hash table. */
2952
2953 static void
2954 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2955 {
2956 struct elf32_arm_link_hash_table *ret
2957 = (struct elf32_arm_link_hash_table *) hash;
2958
2959 bfd_hash_table_free (&ret->stub_hash_table);
2960 _bfd_generic_link_hash_table_free (hash);
2961 }
2962
2963 /* Determine if we're dealing with a Thumb only architecture. */
2964
2965 static bfd_boolean
2966 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2967 {
2968 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2969 Tag_CPU_arch);
2970 int profile;
2971
2972 if (arch != TAG_CPU_ARCH_V7)
2973 return FALSE;
2974
2975 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2976 Tag_CPU_arch_profile);
2977
2978 return profile == 'M';
2979 }
2980
2981 /* Determine if we're dealing with a Thumb-2 object. */
2982
2983 static bfd_boolean
2984 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2985 {
2986 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2987 Tag_CPU_arch);
2988 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
2989 }
2990
2991 static bfd_boolean
2992 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
2993 {
2994 switch (stub_type)
2995 {
2996 case arm_stub_long_branch_thumb_only:
2997 case arm_stub_long_branch_v4t_thumb_arm:
2998 case arm_stub_short_branch_v4t_thumb_arm:
2999 case arm_stub_long_branch_v4t_thumb_arm_pic:
3000 case arm_stub_long_branch_thumb_only_pic:
3001 return TRUE;
3002 case arm_stub_none:
3003 BFD_FAIL ();
3004 return FALSE;
3005 break;
3006 default:
3007 return FALSE;
3008 }
3009 }
3010
3011 /* Determine the type of stub needed, if any, for a call. */
3012
3013 static enum elf32_arm_stub_type
3014 arm_type_of_stub (struct bfd_link_info *info,
3015 asection *input_sec,
3016 const Elf_Internal_Rela *rel,
3017 unsigned char st_type,
3018 struct elf32_arm_link_hash_entry *hash,
3019 bfd_vma destination,
3020 asection *sym_sec,
3021 bfd *input_bfd,
3022 const char *name)
3023 {
3024 bfd_vma location;
3025 bfd_signed_vma branch_offset;
3026 unsigned int r_type;
3027 struct elf32_arm_link_hash_table * globals;
3028 int thumb2;
3029 int thumb_only;
3030 enum elf32_arm_stub_type stub_type = arm_stub_none;
3031 int use_plt = 0;
3032
3033 /* We don't know the actual type of destination in case it is of
3034 type STT_SECTION: give up. */
3035 if (st_type == STT_SECTION)
3036 return stub_type;
3037
3038 globals = elf32_arm_hash_table (info);
3039
3040 thumb_only = using_thumb_only (globals);
3041
3042 thumb2 = using_thumb2 (globals);
3043
3044 /* Determine where the call point is. */
3045 location = (input_sec->output_offset
3046 + input_sec->output_section->vma
3047 + rel->r_offset);
3048
3049 branch_offset = (bfd_signed_vma)(destination - location);
3050
3051 r_type = ELF32_R_TYPE (rel->r_info);
3052
3053 /* Keep a simpler condition, for the sake of clarity. */
3054 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3055 {
3056 use_plt = 1;
3057 /* Note when dealing with PLT entries: the main PLT stub is in
3058 ARM mode, so if the branch is in Thumb mode, another
3059 Thumb->ARM stub will be inserted later just before the ARM
3060 PLT stub. We don't take this extra distance into account
3061 here, because if a long branch stub is needed, we'll add a
3062 Thumb->Arm one and branch directly to the ARM PLT entry
3063 because it avoids spreading offset corrections in several
3064 places. */
3065 }
3066
3067 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3068 {
3069 /* Handle cases where:
3070 - this call goes too far (different Thumb/Thumb2 max
3071 distance)
3072 - it's a Thumb->Arm call and blx is not available, or it's a
3073 Thumb->Arm branch (not bl). A stub is needed in this case,
3074 but only if this call is not through a PLT entry. Indeed,
3075 PLT stubs handle mode switching already.
3076 */
3077 if ((!thumb2
3078 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3079 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3080 || (thumb2
3081 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3082 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3083 || ((st_type != STT_ARM_TFUNC)
3084 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3085 || (r_type == R_ARM_THM_JUMP24))
3086 && !use_plt))
3087 {
3088 if (st_type == STT_ARM_TFUNC)
3089 {
3090 /* Thumb to thumb. */
3091 if (!thumb_only)
3092 {
3093 stub_type = (info->shared | globals->pic_veneer)
3094 /* PIC stubs. */
3095 ? ((globals->use_blx
3096 && (r_type ==R_ARM_THM_CALL))
3097 /* V5T and above. Stub starts with ARM code, so
3098 we must be able to switch mode before
3099 reaching it, which is only possible for 'bl'
3100 (ie R_ARM_THM_CALL relocation). */
3101 ? arm_stub_long_branch_any_thumb_pic
3102 /* On V4T, use Thumb code only. */
3103 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3104
3105 /* non-PIC stubs. */
3106 : ((globals->use_blx
3107 && (r_type ==R_ARM_THM_CALL))
3108 /* V5T and above. */
3109 ? arm_stub_long_branch_any_any
3110 /* V4T. */
3111 : arm_stub_long_branch_v4t_thumb_thumb);
3112 }
3113 else
3114 {
3115 stub_type = (info->shared | globals->pic_veneer)
3116 /* PIC stub. */
3117 ? arm_stub_long_branch_thumb_only_pic
3118 /* non-PIC stub. */
3119 : arm_stub_long_branch_thumb_only;
3120 }
3121 }
3122 else
3123 {
3124 /* Thumb to arm. */
3125 if (sym_sec != NULL
3126 && sym_sec->owner != NULL
3127 && !INTERWORK_FLAG (sym_sec->owner))
3128 {
3129 (*_bfd_error_handler)
3130 (_("%B(%s): warning: interworking not enabled.\n"
3131 " first occurrence: %B: Thumb call to ARM"),
3132 sym_sec->owner, input_bfd, name);
3133 }
3134
3135 stub_type = (info->shared | globals->pic_veneer)
3136 /* PIC stubs. */
3137 ? ((globals->use_blx
3138 && (r_type ==R_ARM_THM_CALL))
3139 /* V5T and above. */
3140 ? arm_stub_long_branch_any_arm_pic
3141 /* V4T PIC stub. */
3142 : arm_stub_long_branch_v4t_thumb_arm_pic)
3143
3144 /* non-PIC stubs. */
3145 : ((globals->use_blx
3146 && (r_type ==R_ARM_THM_CALL))
3147 /* V5T and above. */
3148 ? arm_stub_long_branch_any_any
3149 /* V4T. */
3150 : arm_stub_long_branch_v4t_thumb_arm);
3151
3152 /* Handle v4t short branches. */
3153 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3154 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3155 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3156 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3157 }
3158 }
3159 }
3160 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3161 {
3162 if (st_type == STT_ARM_TFUNC)
3163 {
3164 /* Arm to thumb. */
3165
3166 if (sym_sec != NULL
3167 && sym_sec->owner != NULL
3168 && !INTERWORK_FLAG (sym_sec->owner))
3169 {
3170 (*_bfd_error_handler)
3171 (_("%B(%s): warning: interworking not enabled.\n"
3172 " first occurrence: %B: ARM call to Thumb"),
3173 sym_sec->owner, input_bfd, name);
3174 }
3175
3176 /* We have an extra 2-bytes reach because of
3177 the mode change (bit 24 (H) of BLX encoding). */
3178 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3179 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3180 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3181 || (r_type == R_ARM_JUMP24)
3182 || (r_type == R_ARM_PLT32))
3183 {
3184 stub_type = (info->shared | globals->pic_veneer)
3185 /* PIC stubs. */
3186 ? ((globals->use_blx)
3187 /* V5T and above. */
3188 ? arm_stub_long_branch_any_thumb_pic
3189 /* V4T stub. */
3190 : arm_stub_long_branch_v4t_arm_thumb_pic)
3191
3192 /* non-PIC stubs. */
3193 : ((globals->use_blx)
3194 /* V5T and above. */
3195 ? arm_stub_long_branch_any_any
3196 /* V4T. */
3197 : arm_stub_long_branch_v4t_arm_thumb);
3198 }
3199 }
3200 else
3201 {
3202 /* Arm to arm. */
3203 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3204 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3205 {
3206 stub_type = (info->shared | globals->pic_veneer)
3207 /* PIC stubs. */
3208 ? arm_stub_long_branch_any_arm_pic
3209 /* non-PIC stubs. */
3210 : arm_stub_long_branch_any_any;
3211 }
3212 }
3213 }
3214
3215 return stub_type;
3216 }
3217
3218 /* Build a name for an entry in the stub hash table. */
3219
3220 static char *
3221 elf32_arm_stub_name (const asection *input_section,
3222 const asection *sym_sec,
3223 const struct elf32_arm_link_hash_entry *hash,
3224 const Elf_Internal_Rela *rel)
3225 {
3226 char *stub_name;
3227 bfd_size_type len;
3228
3229 if (hash)
3230 {
3231 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3232 stub_name = bfd_malloc (len);
3233 if (stub_name != NULL)
3234 sprintf (stub_name, "%08x_%s+%x",
3235 input_section->id & 0xffffffff,
3236 hash->root.root.root.string,
3237 (int) rel->r_addend & 0xffffffff);
3238 }
3239 else
3240 {
3241 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3242 stub_name = bfd_malloc (len);
3243 if (stub_name != NULL)
3244 sprintf (stub_name, "%08x_%x:%x+%x",
3245 input_section->id & 0xffffffff,
3246 sym_sec->id & 0xffffffff,
3247 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3248 (int) rel->r_addend & 0xffffffff);
3249 }
3250
3251 return stub_name;
3252 }
3253
3254 /* Look up an entry in the stub hash. Stub entries are cached because
3255 creating the stub name takes a bit of time. */
3256
3257 static struct elf32_arm_stub_hash_entry *
3258 elf32_arm_get_stub_entry (const asection *input_section,
3259 const asection *sym_sec,
3260 struct elf_link_hash_entry *hash,
3261 const Elf_Internal_Rela *rel,
3262 struct elf32_arm_link_hash_table *htab)
3263 {
3264 struct elf32_arm_stub_hash_entry *stub_entry;
3265 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3266 const asection *id_sec;
3267
3268 if ((input_section->flags & SEC_CODE) == 0)
3269 return NULL;
3270
3271 /* If this input section is part of a group of sections sharing one
3272 stub section, then use the id of the first section in the group.
3273 Stub names need to include a section id, as there may well be
3274 more than one stub used to reach say, printf, and we need to
3275 distinguish between them. */
3276 id_sec = htab->stub_group[input_section->id].link_sec;
3277
3278 if (h != NULL && h->stub_cache != NULL
3279 && h->stub_cache->h == h
3280 && h->stub_cache->id_sec == id_sec)
3281 {
3282 stub_entry = h->stub_cache;
3283 }
3284 else
3285 {
3286 char *stub_name;
3287
3288 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3289 if (stub_name == NULL)
3290 return NULL;
3291
3292 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3293 stub_name, FALSE, FALSE);
3294 if (h != NULL)
3295 h->stub_cache = stub_entry;
3296
3297 free (stub_name);
3298 }
3299
3300 return stub_entry;
3301 }
3302
3303 /* Find or create a stub section. Returns a pointer to the stub section, and
3304 the section to which the stub section will be attached (in *LINK_SEC_P).
3305 LINK_SEC_P may be NULL. */
3306
3307 static asection *
3308 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3309 struct elf32_arm_link_hash_table *htab)
3310 {
3311 asection *link_sec;
3312 asection *stub_sec;
3313
3314 link_sec = htab->stub_group[section->id].link_sec;
3315 stub_sec = htab->stub_group[section->id].stub_sec;
3316 if (stub_sec == NULL)
3317 {
3318 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3319 if (stub_sec == NULL)
3320 {
3321 size_t namelen;
3322 bfd_size_type len;
3323 char *s_name;
3324
3325 namelen = strlen (link_sec->name);
3326 len = namelen + sizeof (STUB_SUFFIX);
3327 s_name = bfd_alloc (htab->stub_bfd, len);
3328 if (s_name == NULL)
3329 return NULL;
3330
3331 memcpy (s_name, link_sec->name, namelen);
3332 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3333 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3334 if (stub_sec == NULL)
3335 return NULL;
3336 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3337 }
3338 htab->stub_group[section->id].stub_sec = stub_sec;
3339 }
3340
3341 if (link_sec_p)
3342 *link_sec_p = link_sec;
3343
3344 return stub_sec;
3345 }
3346
3347 /* Add a new stub entry to the stub hash. Not all fields of the new
3348 stub entry are initialised. */
3349
3350 static struct elf32_arm_stub_hash_entry *
3351 elf32_arm_add_stub (const char *stub_name,
3352 asection *section,
3353 struct elf32_arm_link_hash_table *htab)
3354 {
3355 asection *link_sec;
3356 asection *stub_sec;
3357 struct elf32_arm_stub_hash_entry *stub_entry;
3358
3359 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3360 if (stub_sec == NULL)
3361 return NULL;
3362
3363 /* Enter this entry into the linker stub hash table. */
3364 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3365 TRUE, FALSE);
3366 if (stub_entry == NULL)
3367 {
3368 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3369 section->owner,
3370 stub_name);
3371 return NULL;
3372 }
3373
3374 stub_entry->stub_sec = stub_sec;
3375 stub_entry->stub_offset = 0;
3376 stub_entry->id_sec = link_sec;
3377
3378 return stub_entry;
3379 }
3380
3381 /* Store an Arm insn into an output section not processed by
3382 elf32_arm_write_section. */
3383
3384 static void
3385 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3386 bfd * output_bfd, bfd_vma val, void * ptr)
3387 {
3388 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3389 bfd_putl32 (val, ptr);
3390 else
3391 bfd_putb32 (val, ptr);
3392 }
3393
3394 /* Store a 16-bit Thumb insn into an output section not processed by
3395 elf32_arm_write_section. */
3396
3397 static void
3398 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3399 bfd * output_bfd, bfd_vma val, void * ptr)
3400 {
3401 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3402 bfd_putl16 (val, ptr);
3403 else
3404 bfd_putb16 (val, ptr);
3405 }
3406
3407 static bfd_reloc_status_type elf32_arm_final_link_relocate
3408 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3409 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3410 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3411
3412 static bfd_boolean
3413 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3414 void * in_arg)
3415 {
3416 #define MAXRELOCS 2
3417 struct elf32_arm_stub_hash_entry *stub_entry;
3418 struct bfd_link_info *info;
3419 struct elf32_arm_link_hash_table *htab;
3420 asection *stub_sec;
3421 bfd *stub_bfd;
3422 bfd_vma stub_addr;
3423 bfd_byte *loc;
3424 bfd_vma sym_value;
3425 int template_size;
3426 int size;
3427 const insn_sequence *template;
3428 int i;
3429 struct elf32_arm_link_hash_table * globals;
3430 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3431 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3432 int nrelocs = 0;
3433
3434 /* Massage our args to the form they really have. */
3435 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3436 info = (struct bfd_link_info *) in_arg;
3437
3438 globals = elf32_arm_hash_table (info);
3439
3440 htab = elf32_arm_hash_table (info);
3441 stub_sec = stub_entry->stub_sec;
3442
3443 /* Make a note of the offset within the stubs for this entry. */
3444 stub_entry->stub_offset = stub_sec->size;
3445 loc = stub_sec->contents + stub_entry->stub_offset;
3446
3447 stub_bfd = stub_sec->owner;
3448
3449 /* This is the address of the start of the stub. */
3450 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3451 + stub_entry->stub_offset;
3452
3453 /* This is the address of the stub destination. */
3454 sym_value = (stub_entry->target_value
3455 + stub_entry->target_section->output_offset
3456 + stub_entry->target_section->output_section->vma);
3457
3458 template = stub_entry->stub_template;
3459 template_size = stub_entry->stub_template_size;
3460
3461 size = 0;
3462 for (i = 0; i < template_size; i++)
3463 {
3464 switch (template[i].type)
3465 {
3466 case THUMB16_TYPE:
3467 {
3468 bfd_vma data = template[i].data;
3469 if (template[i].reloc_addend != 0)
3470 {
3471 /* We've borrowed the reloc_addend field to mean we should
3472 insert a condition code into this (Thumb-1 branch)
3473 instruction. See THUMB16_BCOND_INSN. */
3474 BFD_ASSERT ((data & 0xff00) == 0xd000);
3475 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3476 }
3477 put_thumb_insn (globals, stub_bfd, data, loc + size);
3478 size += 2;
3479 }
3480 break;
3481
3482 case THUMB32_TYPE:
3483 put_thumb_insn (globals, stub_bfd, (template[i].data >> 16) & 0xffff,
3484 loc + size);
3485 put_thumb_insn (globals, stub_bfd, template[i].data & 0xffff,
3486 loc + size + 2);
3487 if (template[i].r_type != R_ARM_NONE)
3488 {
3489 stub_reloc_idx[nrelocs] = i;
3490 stub_reloc_offset[nrelocs++] = size;
3491 }
3492 size += 4;
3493 break;
3494
3495 case ARM_TYPE:
3496 put_arm_insn (globals, stub_bfd, template[i].data, loc + size);
3497 /* Handle cases where the target is encoded within the
3498 instruction. */
3499 if (template[i].r_type == R_ARM_JUMP24)
3500 {
3501 stub_reloc_idx[nrelocs] = i;
3502 stub_reloc_offset[nrelocs++] = size;
3503 }
3504 size += 4;
3505 break;
3506
3507 case DATA_TYPE:
3508 bfd_put_32 (stub_bfd, template[i].data, loc + size);
3509 stub_reloc_idx[nrelocs] = i;
3510 stub_reloc_offset[nrelocs++] = size;
3511 size += 4;
3512 break;
3513
3514 default:
3515 BFD_FAIL ();
3516 return FALSE;
3517 }
3518 }
3519
3520 stub_sec->size += size;
3521
3522 /* Stub size has already been computed in arm_size_one_stub. Check
3523 consistency. */
3524 BFD_ASSERT (size == stub_entry->stub_size);
3525
3526 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3527 if (stub_entry->st_type == STT_ARM_TFUNC)
3528 sym_value |= 1;
3529
3530 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3531 in each stub. */
3532 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3533
3534 for (i = 0; i < nrelocs; i++)
3535 if (template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3536 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3537 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3538 || template[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3539 {
3540 Elf_Internal_Rela rel;
3541 bfd_boolean unresolved_reloc;
3542 char *error_message;
3543 int sym_flags
3544 = (template[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3545 ? STT_ARM_TFUNC : 0;
3546 bfd_vma points_to = sym_value + stub_entry->target_addend;
3547
3548 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3549 rel.r_info = ELF32_R_INFO (0, template[stub_reloc_idx[i]].r_type);
3550 rel.r_addend = template[stub_reloc_idx[i]].reloc_addend;
3551
3552 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3553 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3554 template should refer back to the instruction after the original
3555 branch. */
3556 points_to = sym_value;
3557
3558 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3559 properly. We should probably use this function unconditionally,
3560 rather than only for certain relocations listed in the enclosing
3561 conditional, for the sake of consistency. */
3562 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3563 (template[stub_reloc_idx[i]].r_type),
3564 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3565 points_to, info, stub_entry->target_section, "", sym_flags,
3566 (struct elf_link_hash_entry *) stub_entry, &unresolved_reloc,
3567 &error_message);
3568 }
3569 else
3570 {
3571 _bfd_final_link_relocate (elf32_arm_howto_from_type
3572 (template[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3573 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3574 sym_value + stub_entry->target_addend,
3575 template[stub_reloc_idx[i]].reloc_addend);
3576 }
3577
3578 return TRUE;
3579 #undef MAXRELOCS
3580 }
3581
3582 /* Calculate the template, template size and instruction size for a stub.
3583 Return value is the instruction size. */
3584
3585 static unsigned int
3586 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3587 const insn_sequence **stub_template,
3588 int *stub_template_size)
3589 {
3590 const insn_sequence *template = NULL;
3591 int template_size = 0, i;
3592 unsigned int size;
3593
3594 template = stub_definitions[stub_type].template;
3595 template_size = stub_definitions[stub_type].template_size;
3596
3597 size = 0;
3598 for (i = 0; i < template_size; i++)
3599 {
3600 switch (template[i].type)
3601 {
3602 case THUMB16_TYPE:
3603 size += 2;
3604 break;
3605
3606 case ARM_TYPE:
3607 case THUMB32_TYPE:
3608 case DATA_TYPE:
3609 size += 4;
3610 break;
3611
3612 default:
3613 BFD_FAIL ();
3614 return FALSE;
3615 }
3616 }
3617
3618 if (stub_template)
3619 *stub_template = template;
3620
3621 if (stub_template_size)
3622 *stub_template_size = template_size;
3623
3624 return size;
3625 }
3626
3627 /* As above, but don't actually build the stub. Just bump offset so
3628 we know stub section sizes. */
3629
3630 static bfd_boolean
3631 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3632 void * in_arg)
3633 {
3634 struct elf32_arm_stub_hash_entry *stub_entry;
3635 struct elf32_arm_link_hash_table *htab;
3636 const insn_sequence *template;
3637 int template_size, size;
3638
3639 /* Massage our args to the form they really have. */
3640 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3641 htab = (struct elf32_arm_link_hash_table *) in_arg;
3642
3643 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3644 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3645
3646 size = find_stub_size_and_template (stub_entry->stub_type, &template,
3647 &template_size);
3648
3649 stub_entry->stub_size = size;
3650 stub_entry->stub_template = template;
3651 stub_entry->stub_template_size = template_size;
3652
3653 size = (size + 7) & ~7;
3654 stub_entry->stub_sec->size += size;
3655
3656 return TRUE;
3657 }
3658
3659 /* External entry points for sizing and building linker stubs. */
3660
3661 /* Set up various things so that we can make a list of input sections
3662 for each output section included in the link. Returns -1 on error,
3663 0 when no stubs will be needed, and 1 on success. */
3664
3665 int
3666 elf32_arm_setup_section_lists (bfd *output_bfd,
3667 struct bfd_link_info *info)
3668 {
3669 bfd *input_bfd;
3670 unsigned int bfd_count;
3671 int top_id, top_index;
3672 asection *section;
3673 asection **input_list, **list;
3674 bfd_size_type amt;
3675 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3676
3677 if (! is_elf_hash_table (htab))
3678 return 0;
3679
3680 /* Count the number of input BFDs and find the top input section id. */
3681 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3682 input_bfd != NULL;
3683 input_bfd = input_bfd->link_next)
3684 {
3685 bfd_count += 1;
3686 for (section = input_bfd->sections;
3687 section != NULL;
3688 section = section->next)
3689 {
3690 if (top_id < section->id)
3691 top_id = section->id;
3692 }
3693 }
3694 htab->bfd_count = bfd_count;
3695
3696 amt = sizeof (struct map_stub) * (top_id + 1);
3697 htab->stub_group = bfd_zmalloc (amt);
3698 if (htab->stub_group == NULL)
3699 return -1;
3700
3701 /* We can't use output_bfd->section_count here to find the top output
3702 section index as some sections may have been removed, and
3703 _bfd_strip_section_from_output doesn't renumber the indices. */
3704 for (section = output_bfd->sections, top_index = 0;
3705 section != NULL;
3706 section = section->next)
3707 {
3708 if (top_index < section->index)
3709 top_index = section->index;
3710 }
3711
3712 htab->top_index = top_index;
3713 amt = sizeof (asection *) * (top_index + 1);
3714 input_list = bfd_malloc (amt);
3715 htab->input_list = input_list;
3716 if (input_list == NULL)
3717 return -1;
3718
3719 /* For sections we aren't interested in, mark their entries with a
3720 value we can check later. */
3721 list = input_list + top_index;
3722 do
3723 *list = bfd_abs_section_ptr;
3724 while (list-- != input_list);
3725
3726 for (section = output_bfd->sections;
3727 section != NULL;
3728 section = section->next)
3729 {
3730 if ((section->flags & SEC_CODE) != 0)
3731 input_list[section->index] = NULL;
3732 }
3733
3734 return 1;
3735 }
3736
3737 /* The linker repeatedly calls this function for each input section,
3738 in the order that input sections are linked into output sections.
3739 Build lists of input sections to determine groupings between which
3740 we may insert linker stubs. */
3741
3742 void
3743 elf32_arm_next_input_section (struct bfd_link_info *info,
3744 asection *isec)
3745 {
3746 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3747
3748 if (isec->output_section->index <= htab->top_index)
3749 {
3750 asection **list = htab->input_list + isec->output_section->index;
3751
3752 if (*list != bfd_abs_section_ptr)
3753 {
3754 /* Steal the link_sec pointer for our list. */
3755 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3756 /* This happens to make the list in reverse order,
3757 which we reverse later. */
3758 PREV_SEC (isec) = *list;
3759 *list = isec;
3760 }
3761 }
3762 }
3763
3764 /* See whether we can group stub sections together. Grouping stub
3765 sections may result in fewer stubs. More importantly, we need to
3766 put all .init* and .fini* stubs at the end of the .init or
3767 .fini output sections respectively, because glibc splits the
3768 _init and _fini functions into multiple parts. Putting a stub in
3769 the middle of a function is not a good idea. */
3770
3771 static void
3772 group_sections (struct elf32_arm_link_hash_table *htab,
3773 bfd_size_type stub_group_size,
3774 bfd_boolean stubs_always_after_branch)
3775 {
3776 asection **list = htab->input_list;
3777
3778 do
3779 {
3780 asection *tail = *list;
3781 asection *head;
3782
3783 if (tail == bfd_abs_section_ptr)
3784 continue;
3785
3786 /* Reverse the list: we must avoid placing stubs at the
3787 beginning of the section because the beginning of the text
3788 section may be required for an interrupt vector in bare metal
3789 code. */
3790 #define NEXT_SEC PREV_SEC
3791 head = NULL;
3792 while (tail != NULL)
3793 {
3794 /* Pop from tail. */
3795 asection *item = tail;
3796 tail = PREV_SEC (item);
3797
3798 /* Push on head. */
3799 NEXT_SEC (item) = head;
3800 head = item;
3801 }
3802
3803 while (head != NULL)
3804 {
3805 asection *curr;
3806 asection *next;
3807 bfd_vma stub_group_start = head->output_offset;
3808 bfd_vma end_of_next;
3809
3810 curr = head;
3811 while (NEXT_SEC (curr) != NULL)
3812 {
3813 next = NEXT_SEC (curr);
3814 end_of_next = next->output_offset + next->size;
3815 if (end_of_next - stub_group_start >= stub_group_size)
3816 /* End of NEXT is too far from start, so stop. */
3817 break;
3818 /* Add NEXT to the group. */
3819 curr = next;
3820 }
3821
3822 /* OK, the size from the start to the start of CURR is less
3823 than stub_group_size and thus can be handled by one stub
3824 section. (Or the head section is itself larger than
3825 stub_group_size, in which case we may be toast.)
3826 We should really be keeping track of the total size of
3827 stubs added here, as stubs contribute to the final output
3828 section size. */
3829 do
3830 {
3831 next = NEXT_SEC (head);
3832 /* Set up this stub group. */
3833 htab->stub_group[head->id].link_sec = curr;
3834 }
3835 while (head != curr && (head = next) != NULL);
3836
3837 /* But wait, there's more! Input sections up to stub_group_size
3838 bytes after the stub section can be handled by it too. */
3839 if (!stubs_always_after_branch)
3840 {
3841 stub_group_start = curr->output_offset + curr->size;
3842
3843 while (next != NULL)
3844 {
3845 end_of_next = next->output_offset + next->size;
3846 if (end_of_next - stub_group_start >= stub_group_size)
3847 /* End of NEXT is too far from stubs, so stop. */
3848 break;
3849 /* Add NEXT to the stub group. */
3850 head = next;
3851 next = NEXT_SEC (head);
3852 htab->stub_group[head->id].link_sec = curr;
3853 }
3854 }
3855 head = next;
3856 }
3857 }
3858 while (list++ != htab->input_list + htab->top_index);
3859
3860 free (htab->input_list);
3861 #undef PREV_SEC
3862 #undef NEXT_SEC
3863 }
3864
3865 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3866 erratum fix. */
3867
3868 static int
3869 a8_reloc_compare (const void *a, const void *b)
3870 {
3871 const struct a8_erratum_reloc *ra = a, *rb = b;
3872
3873 if (ra->from < rb->from)
3874 return -1;
3875 else if (ra->from > rb->from)
3876 return 1;
3877 else
3878 return 0;
3879 }
3880
3881 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3882 const char *, char **);
3883
3884 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3885 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3886 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3887 otherwise. */
3888
3889 static bfd_boolean
3890 cortex_a8_erratum_scan (bfd *input_bfd,
3891 struct bfd_link_info *info,
3892 struct a8_erratum_fix **a8_fixes_p,
3893 unsigned int *num_a8_fixes_p,
3894 unsigned int *a8_fix_table_size_p,
3895 struct a8_erratum_reloc *a8_relocs,
3896 unsigned int num_a8_relocs)
3897 {
3898 asection *section;
3899 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3900 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3901 unsigned int num_a8_fixes = *num_a8_fixes_p;
3902 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3903
3904 for (section = input_bfd->sections;
3905 section != NULL;
3906 section = section->next)
3907 {
3908 bfd_byte *contents = NULL;
3909 struct _arm_elf_section_data *sec_data;
3910 unsigned int span;
3911 bfd_vma base_vma;
3912
3913 if (elf_section_type (section) != SHT_PROGBITS
3914 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3915 || (section->flags & SEC_EXCLUDE) != 0
3916 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3917 || (section->output_section == bfd_abs_section_ptr))
3918 continue;
3919
3920 base_vma = section->output_section->vma + section->output_offset;
3921
3922 if (elf_section_data (section)->this_hdr.contents != NULL)
3923 contents = elf_section_data (section)->this_hdr.contents;
3924 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3925 return TRUE;
3926
3927 sec_data = elf32_arm_section_data (section);
3928
3929 for (span = 0; span < sec_data->mapcount; span++)
3930 {
3931 unsigned int span_start = sec_data->map[span].vma;
3932 unsigned int span_end = (span == sec_data->mapcount - 1)
3933 ? section->size : sec_data->map[span + 1].vma;
3934 unsigned int i;
3935 char span_type = sec_data->map[span].type;
3936 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3937
3938 if (span_type != 't')
3939 continue;
3940
3941 /* Span is entirely within a single 4KB region: skip scanning. */
3942 if (((base_vma + span_start) & ~0xfff)
3943 == ((base_vma + span_end) & ~0xfff))
3944 continue;
3945
3946 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
3947
3948 * The opcode is BLX.W, BL.W, B.W, Bcc.W
3949 * The branch target is in the same 4KB region as the
3950 first half of the branch.
3951 * The instruction before the branch is a 32-bit
3952 length non-branch instruction. */
3953 for (i = span_start; i < span_end;)
3954 {
3955 unsigned int insn = bfd_getl16 (&contents[i]);
3956 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
3957 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
3958
3959 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
3960 insn_32bit = TRUE;
3961
3962 if (insn_32bit)
3963 {
3964 /* Load the rest of the insn (in manual-friendly order). */
3965 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
3966
3967 /* Encoding T4: B<c>.W. */
3968 is_b = (insn & 0xf800d000) == 0xf0009000;
3969 /* Encoding T1: BL<c>.W. */
3970 is_bl = (insn & 0xf800d000) == 0xf000d000;
3971 /* Encoding T2: BLX<c>.W. */
3972 is_blx = (insn & 0xf800d000) == 0xf000c000;
3973 /* Encoding T3: B<c>.W (not permitted in IT block). */
3974 is_bcc = (insn & 0xf800d000) == 0xf0008000
3975 && (insn & 0x07f00000) != 0x03800000;
3976 }
3977
3978 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
3979
3980 if (((base_vma + i) & 0xfff) == 0xffe
3981 && insn_32bit
3982 && is_32bit_branch
3983 && last_was_32bit
3984 && ! last_was_branch)
3985 {
3986 bfd_signed_vma offset;
3987 bfd_boolean force_target_arm = FALSE;
3988 bfd_boolean force_target_thumb = FALSE;
3989 bfd_vma target;
3990 enum elf32_arm_stub_type stub_type = arm_stub_none;
3991 struct a8_erratum_reloc key, *found;
3992
3993 key.from = base_vma + i;
3994 found = bsearch (&key, a8_relocs, num_a8_relocs,
3995 sizeof (struct a8_erratum_reloc),
3996 &a8_reloc_compare);
3997
3998 if (found)
3999 {
4000 char *error_message = NULL;
4001 struct elf_link_hash_entry *entry;
4002
4003 /* We don't care about the error returned from this
4004 function, only if there is glue or not. */
4005 entry = find_thumb_glue (info, found->sym_name,
4006 &error_message);
4007
4008 if (entry)
4009 found->non_a8_stub = TRUE;
4010
4011 if (found->r_type == R_ARM_THM_CALL
4012 && found->st_type != STT_ARM_TFUNC)
4013 force_target_arm = TRUE;
4014 else if (found->r_type == R_ARM_THM_CALL
4015 && found->st_type == STT_ARM_TFUNC)
4016 force_target_thumb = TRUE;
4017 }
4018
4019 /* Check if we have an offending branch instruction. */
4020
4021 if (found && found->non_a8_stub)
4022 /* We've already made a stub for this instruction, e.g.
4023 it's a long branch or a Thumb->ARM stub. Assume that
4024 stub will suffice to work around the A8 erratum (see
4025 setting of always_after_branch above). */
4026 ;
4027 else if (is_bcc)
4028 {
4029 offset = (insn & 0x7ff) << 1;
4030 offset |= (insn & 0x3f0000) >> 4;
4031 offset |= (insn & 0x2000) ? 0x40000 : 0;
4032 offset |= (insn & 0x800) ? 0x80000 : 0;
4033 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4034 if (offset & 0x100000)
4035 offset |= ~ ((bfd_signed_vma) 0xfffff);
4036 stub_type = arm_stub_a8_veneer_b_cond;
4037 }
4038 else if (is_b || is_bl || is_blx)
4039 {
4040 int s = (insn & 0x4000000) != 0;
4041 int j1 = (insn & 0x2000) != 0;
4042 int j2 = (insn & 0x800) != 0;
4043 int i1 = !(j1 ^ s);
4044 int i2 = !(j2 ^ s);
4045
4046 offset = (insn & 0x7ff) << 1;
4047 offset |= (insn & 0x3ff0000) >> 4;
4048 offset |= i2 << 22;
4049 offset |= i1 << 23;
4050 offset |= s << 24;
4051 if (offset & 0x1000000)
4052 offset |= ~ ((bfd_signed_vma) 0xffffff);
4053
4054 if (is_blx)
4055 offset &= ~ ((bfd_signed_vma) 3);
4056
4057 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4058 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4059 }
4060
4061 if (stub_type != arm_stub_none)
4062 {
4063 bfd_vma pc_for_insn = base_vma + i + 4;
4064
4065 /* The original instruction is a BL, but the target is
4066 an ARM instruction. If we were not making a stub,
4067 the BL would have been converted to a BLX. Use the
4068 BLX stub instead in that case. */
4069 if (htab->use_blx && force_target_arm
4070 && stub_type == arm_stub_a8_veneer_bl)
4071 {
4072 stub_type = arm_stub_a8_veneer_blx;
4073 is_blx = TRUE;
4074 is_bl = FALSE;
4075 }
4076 /* Conversely, if the original instruction was
4077 BLX but the target is Thumb mode, use the BL
4078 stub. */
4079 else if (force_target_thumb
4080 && stub_type == arm_stub_a8_veneer_blx)
4081 {
4082 stub_type = arm_stub_a8_veneer_bl;
4083 is_blx = FALSE;
4084 is_bl = TRUE;
4085 }
4086
4087 if (is_blx)
4088 pc_for_insn &= ~ ((bfd_vma) 3);
4089
4090 /* If we found a relocation, use the proper destination,
4091 not the offset in the (unrelocated) instruction.
4092 Note this is always done if we switched the stub type
4093 above. */
4094 if (found)
4095 offset =
4096 (bfd_signed_vma) (found->destination - pc_for_insn);
4097
4098 target = pc_for_insn + offset;
4099
4100 /* The BLX stub is ARM-mode code. Adjust the offset to
4101 take the different PC value (+8 instead of +4) into
4102 account. */
4103 if (stub_type == arm_stub_a8_veneer_blx)
4104 offset += 4;
4105
4106 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4107 {
4108 char *stub_name;
4109
4110 if (num_a8_fixes == a8_fix_table_size)
4111 {
4112 a8_fix_table_size *= 2;
4113 a8_fixes = bfd_realloc (a8_fixes,
4114 sizeof (struct a8_erratum_fix)
4115 * a8_fix_table_size);
4116 }
4117
4118 stub_name = bfd_malloc (8 + 1 + 8 + 1);
4119 if (stub_name != NULL)
4120 sprintf (stub_name, "%x:%x", section->id, i);
4121
4122 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4123 a8_fixes[num_a8_fixes].section = section;
4124 a8_fixes[num_a8_fixes].offset = i;
4125 a8_fixes[num_a8_fixes].addend = offset;
4126 a8_fixes[num_a8_fixes].orig_insn = insn;
4127 a8_fixes[num_a8_fixes].stub_name = stub_name;
4128 a8_fixes[num_a8_fixes].stub_type = stub_type;
4129
4130 num_a8_fixes++;
4131 }
4132 }
4133 }
4134
4135 i += insn_32bit ? 4 : 2;
4136 last_was_32bit = insn_32bit;
4137 last_was_branch = is_32bit_branch;
4138 }
4139 }
4140
4141 if (elf_section_data (section)->this_hdr.contents == NULL)
4142 free (contents);
4143 }
4144
4145 *a8_fixes_p = a8_fixes;
4146 *num_a8_fixes_p = num_a8_fixes;
4147 *a8_fix_table_size_p = a8_fix_table_size;
4148
4149 return FALSE;
4150 }
4151
4152 /* Determine and set the size of the stub section for a final link.
4153
4154 The basic idea here is to examine all the relocations looking for
4155 PC-relative calls to a target that is unreachable with a "bl"
4156 instruction. */
4157
4158 bfd_boolean
4159 elf32_arm_size_stubs (bfd *output_bfd,
4160 bfd *stub_bfd,
4161 struct bfd_link_info *info,
4162 bfd_signed_vma group_size,
4163 asection * (*add_stub_section) (const char *, asection *),
4164 void (*layout_sections_again) (void))
4165 {
4166 bfd_size_type stub_group_size;
4167 bfd_boolean stubs_always_after_branch;
4168 bfd_boolean stub_changed = 0;
4169 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4170 struct a8_erratum_fix *a8_fixes = NULL;
4171 unsigned int num_a8_fixes = 0, prev_num_a8_fixes = 0, a8_fix_table_size = 10;
4172 struct a8_erratum_reloc *a8_relocs = NULL;
4173 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4174
4175 if (htab->fix_cortex_a8)
4176 {
4177 a8_fixes = bfd_zmalloc (sizeof (struct a8_erratum_fix)
4178 * a8_fix_table_size);
4179 a8_relocs = bfd_zmalloc (sizeof (struct a8_erratum_reloc)
4180 * a8_reloc_table_size);
4181 }
4182
4183 /* Propagate mach to stub bfd, because it may not have been
4184 finalized when we created stub_bfd. */
4185 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4186 bfd_get_mach (output_bfd));
4187
4188 /* Stash our params away. */
4189 htab->stub_bfd = stub_bfd;
4190 htab->add_stub_section = add_stub_section;
4191 htab->layout_sections_again = layout_sections_again;
4192 stubs_always_after_branch = group_size < 0;
4193
4194 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4195 as the first half of a 32-bit branch straddling two 4K pages. This is a
4196 crude way of enforcing that. */
4197 if (htab->fix_cortex_a8)
4198 stubs_always_after_branch = 1;
4199
4200 if (group_size < 0)
4201 stub_group_size = -group_size;
4202 else
4203 stub_group_size = group_size;
4204
4205 if (stub_group_size == 1)
4206 {
4207 /* Default values. */
4208 /* Thumb branch range is +-4MB has to be used as the default
4209 maximum size (a given section can contain both ARM and Thumb
4210 code, so the worst case has to be taken into account).
4211
4212 This value is 24K less than that, which allows for 2025
4213 12-byte stubs. If we exceed that, then we will fail to link.
4214 The user will have to relink with an explicit group size
4215 option. */
4216 stub_group_size = 4170000;
4217 }
4218
4219 group_sections (htab, stub_group_size, stubs_always_after_branch);
4220
4221 while (1)
4222 {
4223 bfd *input_bfd;
4224 unsigned int bfd_indx;
4225 asection *stub_sec;
4226
4227 num_a8_fixes = 0;
4228
4229 for (input_bfd = info->input_bfds, bfd_indx = 0;
4230 input_bfd != NULL;
4231 input_bfd = input_bfd->link_next, bfd_indx++)
4232 {
4233 Elf_Internal_Shdr *symtab_hdr;
4234 asection *section;
4235 Elf_Internal_Sym *local_syms = NULL;
4236
4237 num_a8_relocs = 0;
4238
4239 /* We'll need the symbol table in a second. */
4240 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4241 if (symtab_hdr->sh_info == 0)
4242 continue;
4243
4244 /* Walk over each section attached to the input bfd. */
4245 for (section = input_bfd->sections;
4246 section != NULL;
4247 section = section->next)
4248 {
4249 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4250
4251 /* If there aren't any relocs, then there's nothing more
4252 to do. */
4253 if ((section->flags & SEC_RELOC) == 0
4254 || section->reloc_count == 0
4255 || (section->flags & SEC_CODE) == 0)
4256 continue;
4257
4258 /* If this section is a link-once section that will be
4259 discarded, then don't create any stubs. */
4260 if (section->output_section == NULL
4261 || section->output_section->owner != output_bfd)
4262 continue;
4263
4264 /* Get the relocs. */
4265 internal_relocs
4266 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4267 NULL, info->keep_memory);
4268 if (internal_relocs == NULL)
4269 goto error_ret_free_local;
4270
4271 /* Now examine each relocation. */
4272 irela = internal_relocs;
4273 irelaend = irela + section->reloc_count;
4274 for (; irela < irelaend; irela++)
4275 {
4276 unsigned int r_type, r_indx;
4277 enum elf32_arm_stub_type stub_type;
4278 struct elf32_arm_stub_hash_entry *stub_entry;
4279 asection *sym_sec;
4280 bfd_vma sym_value;
4281 bfd_vma destination;
4282 struct elf32_arm_link_hash_entry *hash;
4283 const char *sym_name;
4284 char *stub_name;
4285 const asection *id_sec;
4286 unsigned char st_type;
4287 bfd_boolean created_stub = FALSE;
4288
4289 r_type = ELF32_R_TYPE (irela->r_info);
4290 r_indx = ELF32_R_SYM (irela->r_info);
4291
4292 if (r_type >= (unsigned int) R_ARM_max)
4293 {
4294 bfd_set_error (bfd_error_bad_value);
4295 error_ret_free_internal:
4296 if (elf_section_data (section)->relocs == NULL)
4297 free (internal_relocs);
4298 goto error_ret_free_local;
4299 }
4300
4301 /* Only look for stubs on branch instructions. */
4302 if ((r_type != (unsigned int) R_ARM_CALL)
4303 && (r_type != (unsigned int) R_ARM_THM_CALL)
4304 && (r_type != (unsigned int) R_ARM_JUMP24)
4305 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4306 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4307 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4308 && (r_type != (unsigned int) R_ARM_PLT32))
4309 continue;
4310
4311 /* Now determine the call target, its name, value,
4312 section. */
4313 sym_sec = NULL;
4314 sym_value = 0;
4315 destination = 0;
4316 hash = NULL;
4317 sym_name = NULL;
4318 if (r_indx < symtab_hdr->sh_info)
4319 {
4320 /* It's a local symbol. */
4321 Elf_Internal_Sym *sym;
4322 Elf_Internal_Shdr *hdr;
4323
4324 if (local_syms == NULL)
4325 {
4326 local_syms
4327 = (Elf_Internal_Sym *) symtab_hdr->contents;
4328 if (local_syms == NULL)
4329 local_syms
4330 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4331 symtab_hdr->sh_info, 0,
4332 NULL, NULL, NULL);
4333 if (local_syms == NULL)
4334 goto error_ret_free_internal;
4335 }
4336
4337 sym = local_syms + r_indx;
4338 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4339 sym_sec = hdr->bfd_section;
4340 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4341 sym_value = sym->st_value;
4342 destination = (sym_value + irela->r_addend
4343 + sym_sec->output_offset
4344 + sym_sec->output_section->vma);
4345 st_type = ELF_ST_TYPE (sym->st_info);
4346 sym_name
4347 = bfd_elf_string_from_elf_section (input_bfd,
4348 symtab_hdr->sh_link,
4349 sym->st_name);
4350 }
4351 else
4352 {
4353 /* It's an external symbol. */
4354 int e_indx;
4355
4356 e_indx = r_indx - symtab_hdr->sh_info;
4357 hash = ((struct elf32_arm_link_hash_entry *)
4358 elf_sym_hashes (input_bfd)[e_indx]);
4359
4360 while (hash->root.root.type == bfd_link_hash_indirect
4361 || hash->root.root.type == bfd_link_hash_warning)
4362 hash = ((struct elf32_arm_link_hash_entry *)
4363 hash->root.root.u.i.link);
4364
4365 if (hash->root.root.type == bfd_link_hash_defined
4366 || hash->root.root.type == bfd_link_hash_defweak)
4367 {
4368 sym_sec = hash->root.root.u.def.section;
4369 sym_value = hash->root.root.u.def.value;
4370 if (sym_sec->output_section != NULL)
4371 destination = (sym_value + irela->r_addend
4372 + sym_sec->output_offset
4373 + sym_sec->output_section->vma);
4374 }
4375 else if ((hash->root.root.type == bfd_link_hash_undefined)
4376 || (hash->root.root.type == bfd_link_hash_undefweak))
4377 {
4378 /* For a shared library, use the PLT stub as
4379 target address to decide whether a long
4380 branch stub is needed.
4381 For absolute code, they cannot be handled. */
4382 struct elf32_arm_link_hash_table *globals =
4383 elf32_arm_hash_table (info);
4384
4385 if (globals->splt != NULL && hash != NULL
4386 && hash->root.plt.offset != (bfd_vma) -1)
4387 {
4388 sym_sec = globals->splt;
4389 sym_value = hash->root.plt.offset;
4390 if (sym_sec->output_section != NULL)
4391 destination = (sym_value
4392 + sym_sec->output_offset
4393 + sym_sec->output_section->vma);
4394 }
4395 else
4396 continue;
4397 }
4398 else
4399 {
4400 bfd_set_error (bfd_error_bad_value);
4401 goto error_ret_free_internal;
4402 }
4403 st_type = ELF_ST_TYPE (hash->root.type);
4404 sym_name = hash->root.root.root.string;
4405 }
4406
4407 do
4408 {
4409 /* Determine what (if any) linker stub is needed. */
4410 stub_type = arm_type_of_stub (info, section, irela,
4411 st_type, hash,
4412 destination, sym_sec,
4413 input_bfd, sym_name);
4414 if (stub_type == arm_stub_none)
4415 break;
4416
4417 /* Support for grouping stub sections. */
4418 id_sec = htab->stub_group[section->id].link_sec;
4419
4420 /* Get the name of this stub. */
4421 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4422 irela);
4423 if (!stub_name)
4424 goto error_ret_free_internal;
4425
4426 /* We've either created a stub for this reloc already,
4427 or we are about to. */
4428 created_stub = TRUE;
4429
4430 stub_entry = arm_stub_hash_lookup
4431 (&htab->stub_hash_table, stub_name,
4432 FALSE, FALSE);
4433 if (stub_entry != NULL)
4434 {
4435 /* The proper stub has already been created. */
4436 free (stub_name);
4437 break;
4438 }
4439
4440 stub_entry = elf32_arm_add_stub (stub_name, section,
4441 htab);
4442 if (stub_entry == NULL)
4443 {
4444 free (stub_name);
4445 goto error_ret_free_internal;
4446 }
4447
4448 stub_entry->target_value = sym_value;
4449 stub_entry->target_section = sym_sec;
4450 stub_entry->stub_type = stub_type;
4451 stub_entry->h = hash;
4452 stub_entry->st_type = st_type;
4453
4454 if (sym_name == NULL)
4455 sym_name = "unnamed";
4456 stub_entry->output_name
4457 = bfd_alloc (htab->stub_bfd,
4458 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4459 + strlen (sym_name));
4460 if (stub_entry->output_name == NULL)
4461 {
4462 free (stub_name);
4463 goto error_ret_free_internal;
4464 }
4465
4466 /* For historical reasons, use the existing names for
4467 ARM-to-Thumb and Thumb-to-ARM stubs. */
4468 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4469 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4470 && st_type != STT_ARM_TFUNC)
4471 sprintf (stub_entry->output_name,
4472 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4473 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4474 || (r_type == (unsigned int) R_ARM_JUMP24))
4475 && st_type == STT_ARM_TFUNC)
4476 sprintf (stub_entry->output_name,
4477 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4478 else
4479 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4480 sym_name);
4481
4482 stub_changed = TRUE;
4483 }
4484 while (0);
4485
4486 /* Look for relocations which might trigger Cortex-A8
4487 erratum. */
4488 if (htab->fix_cortex_a8
4489 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4490 || r_type == (unsigned int) R_ARM_THM_JUMP19
4491 || r_type == (unsigned int) R_ARM_THM_CALL
4492 || r_type == (unsigned int) R_ARM_THM_XPC22))
4493 {
4494 bfd_vma from = section->output_section->vma
4495 + section->output_offset
4496 + irela->r_offset;
4497
4498 if ((from & 0xfff) == 0xffe)
4499 {
4500 /* Found a candidate. Note we haven't checked the
4501 destination is within 4K here: if we do so (and
4502 don't create an entry in a8_relocs) we can't tell
4503 that a branch should have been relocated when
4504 scanning later. */
4505 if (num_a8_relocs == a8_reloc_table_size)
4506 {
4507 a8_reloc_table_size *= 2;
4508 a8_relocs = bfd_realloc (a8_relocs,
4509 sizeof (struct a8_erratum_reloc)
4510 * a8_reloc_table_size);
4511 }
4512
4513 a8_relocs[num_a8_relocs].from = from;
4514 a8_relocs[num_a8_relocs].destination = destination;
4515 a8_relocs[num_a8_relocs].r_type = r_type;
4516 a8_relocs[num_a8_relocs].st_type = st_type;
4517 a8_relocs[num_a8_relocs].sym_name = sym_name;
4518 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4519
4520 num_a8_relocs++;
4521 }
4522 }
4523 }
4524
4525 /* We're done with the internal relocs, free them. */
4526 if (elf_section_data (section)->relocs == NULL)
4527 free (internal_relocs);
4528 }
4529
4530 if (htab->fix_cortex_a8)
4531 {
4532 /* Sort relocs which might apply to Cortex-A8 erratum. */
4533 qsort (a8_relocs, num_a8_relocs, sizeof (struct a8_erratum_reloc),
4534 &a8_reloc_compare);
4535
4536 /* Scan for branches which might trigger Cortex-A8 erratum. */
4537 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4538 &num_a8_fixes, &a8_fix_table_size,
4539 a8_relocs, num_a8_relocs) != 0)
4540 goto error_ret_free_local;
4541 }
4542 }
4543
4544 if (htab->fix_cortex_a8 && num_a8_fixes != prev_num_a8_fixes)
4545 stub_changed = TRUE;
4546
4547 if (!stub_changed)
4548 break;
4549
4550 /* OK, we've added some stubs. Find out the new size of the
4551 stub sections. */
4552 for (stub_sec = htab->stub_bfd->sections;
4553 stub_sec != NULL;
4554 stub_sec = stub_sec->next)
4555 {
4556 /* Ignore non-stub sections. */
4557 if (!strstr (stub_sec->name, STUB_SUFFIX))
4558 continue;
4559
4560 stub_sec->size = 0;
4561 }
4562
4563 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4564
4565 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4566 if (htab->fix_cortex_a8)
4567 for (i = 0; i < num_a8_fixes; i++)
4568 {
4569 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4570 a8_fixes[i].section, htab);
4571
4572 if (stub_sec == NULL)
4573 goto error_ret_free_local;
4574
4575 stub_sec->size
4576 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4577 NULL);
4578 }
4579
4580
4581 /* Ask the linker to do its stuff. */
4582 (*htab->layout_sections_again) ();
4583 stub_changed = FALSE;
4584 prev_num_a8_fixes = num_a8_fixes;
4585 }
4586
4587 /* Add stubs for Cortex-A8 erratum fixes now. */
4588 if (htab->fix_cortex_a8)
4589 {
4590 for (i = 0; i < num_a8_fixes; i++)
4591 {
4592 struct elf32_arm_stub_hash_entry *stub_entry;
4593 char *stub_name = a8_fixes[i].stub_name;
4594 asection *section = a8_fixes[i].section;
4595 unsigned int section_id = a8_fixes[i].section->id;
4596 asection *link_sec = htab->stub_group[section_id].link_sec;
4597 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4598 const insn_sequence *template;
4599 int template_size, size = 0;
4600
4601 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4602 TRUE, FALSE);
4603 if (stub_entry == NULL)
4604 {
4605 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4606 section->owner,
4607 stub_name);
4608 return FALSE;
4609 }
4610
4611 stub_entry->stub_sec = stub_sec;
4612 stub_entry->stub_offset = 0;
4613 stub_entry->id_sec = link_sec;
4614 stub_entry->stub_type = a8_fixes[i].stub_type;
4615 stub_entry->target_section = a8_fixes[i].section;
4616 stub_entry->target_value = a8_fixes[i].offset;
4617 stub_entry->target_addend = a8_fixes[i].addend;
4618 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4619 stub_entry->st_type = STT_ARM_TFUNC;
4620
4621 size = find_stub_size_and_template (a8_fixes[i].stub_type, &template,
4622 &template_size);
4623
4624 stub_entry->stub_size = size;
4625 stub_entry->stub_template = template;
4626 stub_entry->stub_template_size = template_size;
4627 }
4628
4629 /* Stash the Cortex-A8 erratum fix array for use later in
4630 elf32_arm_write_section(). */
4631 htab->a8_erratum_fixes = a8_fixes;
4632 htab->num_a8_erratum_fixes = num_a8_fixes;
4633 }
4634 else
4635 {
4636 htab->a8_erratum_fixes = NULL;
4637 htab->num_a8_erratum_fixes = 0;
4638 }
4639 return TRUE;
4640
4641 error_ret_free_local:
4642 return FALSE;
4643 }
4644
4645 /* Build all the stubs associated with the current output file. The
4646 stubs are kept in a hash table attached to the main linker hash
4647 table. We also set up the .plt entries for statically linked PIC
4648 functions here. This function is called via arm_elf_finish in the
4649 linker. */
4650
4651 bfd_boolean
4652 elf32_arm_build_stubs (struct bfd_link_info *info)
4653 {
4654 asection *stub_sec;
4655 struct bfd_hash_table *table;
4656 struct elf32_arm_link_hash_table *htab;
4657
4658 htab = elf32_arm_hash_table (info);
4659
4660 for (stub_sec = htab->stub_bfd->sections;
4661 stub_sec != NULL;
4662 stub_sec = stub_sec->next)
4663 {
4664 bfd_size_type size;
4665
4666 /* Ignore non-stub sections. */
4667 if (!strstr (stub_sec->name, STUB_SUFFIX))
4668 continue;
4669
4670 /* Allocate memory to hold the linker stubs. */
4671 size = stub_sec->size;
4672 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
4673 if (stub_sec->contents == NULL && size != 0)
4674 return FALSE;
4675 stub_sec->size = 0;
4676 }
4677
4678 /* Build the stubs as directed by the stub hash table. */
4679 table = &htab->stub_hash_table;
4680 bfd_hash_traverse (table, arm_build_one_stub, info);
4681
4682 return TRUE;
4683 }
4684
4685 /* Locate the Thumb encoded calling stub for NAME. */
4686
4687 static struct elf_link_hash_entry *
4688 find_thumb_glue (struct bfd_link_info *link_info,
4689 const char *name,
4690 char **error_message)
4691 {
4692 char *tmp_name;
4693 struct elf_link_hash_entry *hash;
4694 struct elf32_arm_link_hash_table *hash_table;
4695
4696 /* We need a pointer to the armelf specific hash table. */
4697 hash_table = elf32_arm_hash_table (link_info);
4698
4699 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4700 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4701
4702 BFD_ASSERT (tmp_name);
4703
4704 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4705
4706 hash = elf_link_hash_lookup
4707 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4708
4709 if (hash == NULL
4710 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4711 tmp_name, name) == -1)
4712 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4713
4714 free (tmp_name);
4715
4716 return hash;
4717 }
4718
4719 /* Locate the ARM encoded calling stub for NAME. */
4720
4721 static struct elf_link_hash_entry *
4722 find_arm_glue (struct bfd_link_info *link_info,
4723 const char *name,
4724 char **error_message)
4725 {
4726 char *tmp_name;
4727 struct elf_link_hash_entry *myh;
4728 struct elf32_arm_link_hash_table *hash_table;
4729
4730 /* We need a pointer to the elfarm specific hash table. */
4731 hash_table = elf32_arm_hash_table (link_info);
4732
4733 tmp_name = bfd_malloc ((bfd_size_type) strlen (name)
4734 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4735
4736 BFD_ASSERT (tmp_name);
4737
4738 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4739
4740 myh = elf_link_hash_lookup
4741 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4742
4743 if (myh == NULL
4744 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4745 tmp_name, name) == -1)
4746 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4747
4748 free (tmp_name);
4749
4750 return myh;
4751 }
4752
4753 /* ARM->Thumb glue (static images):
4754
4755 .arm
4756 __func_from_arm:
4757 ldr r12, __func_addr
4758 bx r12
4759 __func_addr:
4760 .word func @ behave as if you saw a ARM_32 reloc.
4761
4762 (v5t static images)
4763 .arm
4764 __func_from_arm:
4765 ldr pc, __func_addr
4766 __func_addr:
4767 .word func @ behave as if you saw a ARM_32 reloc.
4768
4769 (relocatable images)
4770 .arm
4771 __func_from_arm:
4772 ldr r12, __func_offset
4773 add r12, r12, pc
4774 bx r12
4775 __func_offset:
4776 .word func - . */
4777
4778 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4779 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4780 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4781 static const insn32 a2t3_func_addr_insn = 0x00000001;
4782
4783 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4784 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4785 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4786
4787 #define ARM2THUMB_PIC_GLUE_SIZE 16
4788 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4789 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4790 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4791
4792 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4793
4794 .thumb .thumb
4795 .align 2 .align 2
4796 __func_from_thumb: __func_from_thumb:
4797 bx pc push {r6, lr}
4798 nop ldr r6, __func_addr
4799 .arm mov lr, pc
4800 b func bx r6
4801 .arm
4802 ;; back_to_thumb
4803 ldmia r13! {r6, lr}
4804 bx lr
4805 __func_addr:
4806 .word func */
4807
4808 #define THUMB2ARM_GLUE_SIZE 8
4809 static const insn16 t2a1_bx_pc_insn = 0x4778;
4810 static const insn16 t2a2_noop_insn = 0x46c0;
4811 static const insn32 t2a3_b_insn = 0xea000000;
4812
4813 #define VFP11_ERRATUM_VENEER_SIZE 8
4814
4815 #define ARM_BX_VENEER_SIZE 12
4816 static const insn32 armbx1_tst_insn = 0xe3100001;
4817 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4818 static const insn32 armbx3_bx_insn = 0xe12fff10;
4819
4820 #ifndef ELFARM_NABI_C_INCLUDED
4821 static void
4822 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4823 {
4824 asection * s;
4825 bfd_byte * contents;
4826
4827 if (size == 0)
4828 {
4829 /* Do not include empty glue sections in the output. */
4830 if (abfd != NULL)
4831 {
4832 s = bfd_get_section_by_name (abfd, name);
4833 if (s != NULL)
4834 s->flags |= SEC_EXCLUDE;
4835 }
4836 return;
4837 }
4838
4839 BFD_ASSERT (abfd != NULL);
4840
4841 s = bfd_get_section_by_name (abfd, name);
4842 BFD_ASSERT (s != NULL);
4843
4844 contents = bfd_alloc (abfd, size);
4845
4846 BFD_ASSERT (s->size == size);
4847 s->contents = contents;
4848 }
4849
4850 bfd_boolean
4851 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4852 {
4853 struct elf32_arm_link_hash_table * globals;
4854
4855 globals = elf32_arm_hash_table (info);
4856 BFD_ASSERT (globals != NULL);
4857
4858 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4859 globals->arm_glue_size,
4860 ARM2THUMB_GLUE_SECTION_NAME);
4861
4862 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4863 globals->thumb_glue_size,
4864 THUMB2ARM_GLUE_SECTION_NAME);
4865
4866 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4867 globals->vfp11_erratum_glue_size,
4868 VFP11_ERRATUM_VENEER_SECTION_NAME);
4869
4870 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4871 globals->bx_glue_size,
4872 ARM_BX_GLUE_SECTION_NAME);
4873
4874 return TRUE;
4875 }
4876
4877 /* Allocate space and symbols for calling a Thumb function from Arm mode.
4878 returns the symbol identifying the stub. */
4879
4880 static struct elf_link_hash_entry *
4881 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
4882 struct elf_link_hash_entry * h)
4883 {
4884 const char * name = h->root.root.string;
4885 asection * s;
4886 char * tmp_name;
4887 struct elf_link_hash_entry * myh;
4888 struct bfd_link_hash_entry * bh;
4889 struct elf32_arm_link_hash_table * globals;
4890 bfd_vma val;
4891 bfd_size_type size;
4892
4893 globals = elf32_arm_hash_table (link_info);
4894
4895 BFD_ASSERT (globals != NULL);
4896 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4897
4898 s = bfd_get_section_by_name
4899 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
4900
4901 BFD_ASSERT (s != NULL);
4902
4903 tmp_name = bfd_malloc ((bfd_size_type) strlen (name) + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4904
4905 BFD_ASSERT (tmp_name);
4906
4907 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4908
4909 myh = elf_link_hash_lookup
4910 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
4911
4912 if (myh != NULL)
4913 {
4914 /* We've already seen this guy. */
4915 free (tmp_name);
4916 return myh;
4917 }
4918
4919 /* The only trick here is using hash_table->arm_glue_size as the value.
4920 Even though the section isn't allocated yet, this is where we will be
4921 putting it. The +1 on the value marks that the stub has not been
4922 output yet - not that it is a Thumb function. */
4923 bh = NULL;
4924 val = globals->arm_glue_size + 1;
4925 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4926 tmp_name, BSF_GLOBAL, s, val,
4927 NULL, TRUE, FALSE, &bh);
4928
4929 myh = (struct elf_link_hash_entry *) bh;
4930 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4931 myh->forced_local = 1;
4932
4933 free (tmp_name);
4934
4935 if (link_info->shared || globals->root.is_relocatable_executable
4936 || globals->pic_veneer)
4937 size = ARM2THUMB_PIC_GLUE_SIZE;
4938 else if (globals->use_blx)
4939 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
4940 else
4941 size = ARM2THUMB_STATIC_GLUE_SIZE;
4942
4943 s->size += size;
4944 globals->arm_glue_size += size;
4945
4946 return myh;
4947 }
4948
4949 /* Allocate space for ARMv4 BX veneers. */
4950
4951 static void
4952 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
4953 {
4954 asection * s;
4955 struct elf32_arm_link_hash_table *globals;
4956 char *tmp_name;
4957 struct elf_link_hash_entry *myh;
4958 struct bfd_link_hash_entry *bh;
4959 bfd_vma val;
4960
4961 /* BX PC does not need a veneer. */
4962 if (reg == 15)
4963 return;
4964
4965 globals = elf32_arm_hash_table (link_info);
4966
4967 BFD_ASSERT (globals != NULL);
4968 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
4969
4970 /* Check if this veneer has already been allocated. */
4971 if (globals->bx_glue_offset[reg])
4972 return;
4973
4974 s = bfd_get_section_by_name
4975 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
4976
4977 BFD_ASSERT (s != NULL);
4978
4979 /* Add symbol for veneer. */
4980 tmp_name = bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
4981
4982 BFD_ASSERT (tmp_name);
4983
4984 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
4985
4986 myh = elf_link_hash_lookup
4987 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
4988
4989 BFD_ASSERT (myh == NULL);
4990
4991 bh = NULL;
4992 val = globals->bx_glue_size;
4993 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
4994 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
4995 NULL, TRUE, FALSE, &bh);
4996
4997 myh = (struct elf_link_hash_entry *) bh;
4998 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
4999 myh->forced_local = 1;
5000
5001 s->size += ARM_BX_VENEER_SIZE;
5002 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5003 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5004 }
5005
5006
5007 /* Add an entry to the code/data map for section SEC. */
5008
5009 static void
5010 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5011 {
5012 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5013 unsigned int newidx;
5014
5015 if (sec_data->map == NULL)
5016 {
5017 sec_data->map = bfd_malloc (sizeof (elf32_arm_section_map));
5018 sec_data->mapcount = 0;
5019 sec_data->mapsize = 1;
5020 }
5021
5022 newidx = sec_data->mapcount++;
5023
5024 if (sec_data->mapcount > sec_data->mapsize)
5025 {
5026 sec_data->mapsize *= 2;
5027 sec_data->map = bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5028 * sizeof (elf32_arm_section_map));
5029 }
5030
5031 if (sec_data->map)
5032 {
5033 sec_data->map[newidx].vma = vma;
5034 sec_data->map[newidx].type = type;
5035 }
5036 }
5037
5038
5039 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5040 veneers are handled for now. */
5041
5042 static bfd_vma
5043 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5044 elf32_vfp11_erratum_list *branch,
5045 bfd *branch_bfd,
5046 asection *branch_sec,
5047 unsigned int offset)
5048 {
5049 asection *s;
5050 struct elf32_arm_link_hash_table *hash_table;
5051 char *tmp_name;
5052 struct elf_link_hash_entry *myh;
5053 struct bfd_link_hash_entry *bh;
5054 bfd_vma val;
5055 struct _arm_elf_section_data *sec_data;
5056 int errcount;
5057 elf32_vfp11_erratum_list *newerr;
5058
5059 hash_table = elf32_arm_hash_table (link_info);
5060
5061 BFD_ASSERT (hash_table != NULL);
5062 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5063
5064 s = bfd_get_section_by_name
5065 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5066
5067 sec_data = elf32_arm_section_data (s);
5068
5069 BFD_ASSERT (s != NULL);
5070
5071 tmp_name = bfd_malloc ((bfd_size_type) strlen
5072 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5073
5074 BFD_ASSERT (tmp_name);
5075
5076 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5077 hash_table->num_vfp11_fixes);
5078
5079 myh = elf_link_hash_lookup
5080 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5081
5082 BFD_ASSERT (myh == NULL);
5083
5084 bh = NULL;
5085 val = hash_table->vfp11_erratum_glue_size;
5086 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5087 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5088 NULL, TRUE, FALSE, &bh);
5089
5090 myh = (struct elf_link_hash_entry *) bh;
5091 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5092 myh->forced_local = 1;
5093
5094 /* Link veneer back to calling location. */
5095 errcount = ++(sec_data->erratumcount);
5096 newerr = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5097
5098 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5099 newerr->vma = -1;
5100 newerr->u.v.branch = branch;
5101 newerr->u.v.id = hash_table->num_vfp11_fixes;
5102 branch->u.b.veneer = newerr;
5103
5104 newerr->next = sec_data->erratumlist;
5105 sec_data->erratumlist = newerr;
5106
5107 /* A symbol for the return from the veneer. */
5108 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5109 hash_table->num_vfp11_fixes);
5110
5111 myh = elf_link_hash_lookup
5112 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5113
5114 if (myh != NULL)
5115 abort ();
5116
5117 bh = NULL;
5118 val = offset + 4;
5119 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5120 branch_sec, val, NULL, TRUE, FALSE, &bh);
5121
5122 myh = (struct elf_link_hash_entry *) bh;
5123 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5124 myh->forced_local = 1;
5125
5126 free (tmp_name);
5127
5128 /* Generate a mapping symbol for the veneer section, and explicitly add an
5129 entry for that symbol to the code/data map for the section. */
5130 if (hash_table->vfp11_erratum_glue_size == 0)
5131 {
5132 bh = NULL;
5133 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5134 ever requires this erratum fix. */
5135 _bfd_generic_link_add_one_symbol (link_info,
5136 hash_table->bfd_of_glue_owner, "$a",
5137 BSF_LOCAL, s, 0, NULL,
5138 TRUE, FALSE, &bh);
5139
5140 myh = (struct elf_link_hash_entry *) bh;
5141 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5142 myh->forced_local = 1;
5143
5144 /* The elf32_arm_init_maps function only cares about symbols from input
5145 BFDs. We must make a note of this generated mapping symbol
5146 ourselves so that code byteswapping works properly in
5147 elf32_arm_write_section. */
5148 elf32_arm_section_map_add (s, 'a', 0);
5149 }
5150
5151 s->size += VFP11_ERRATUM_VENEER_SIZE;
5152 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5153 hash_table->num_vfp11_fixes++;
5154
5155 /* The offset of the veneer. */
5156 return val;
5157 }
5158
5159 #define ARM_GLUE_SECTION_FLAGS \
5160 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5161 | SEC_READONLY | SEC_LINKER_CREATED)
5162
5163 /* Create a fake section for use by the ARM backend of the linker. */
5164
5165 static bfd_boolean
5166 arm_make_glue_section (bfd * abfd, const char * name)
5167 {
5168 asection * sec;
5169
5170 sec = bfd_get_section_by_name (abfd, name);
5171 if (sec != NULL)
5172 /* Already made. */
5173 return TRUE;
5174
5175 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5176
5177 if (sec == NULL
5178 || !bfd_set_section_alignment (abfd, sec, 2))
5179 return FALSE;
5180
5181 /* Set the gc mark to prevent the section from being removed by garbage
5182 collection, despite the fact that no relocs refer to this section. */
5183 sec->gc_mark = 1;
5184
5185 return TRUE;
5186 }
5187
5188 /* Add the glue sections to ABFD. This function is called from the
5189 linker scripts in ld/emultempl/{armelf}.em. */
5190
5191 bfd_boolean
5192 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5193 struct bfd_link_info *info)
5194 {
5195 /* If we are only performing a partial
5196 link do not bother adding the glue. */
5197 if (info->relocatable)
5198 return TRUE;
5199
5200 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5201 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5202 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5203 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5204 }
5205
5206 /* Select a BFD to be used to hold the sections used by the glue code.
5207 This function is called from the linker scripts in ld/emultempl/
5208 {armelf/pe}.em. */
5209
5210 bfd_boolean
5211 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5212 {
5213 struct elf32_arm_link_hash_table *globals;
5214
5215 /* If we are only performing a partial link
5216 do not bother getting a bfd to hold the glue. */
5217 if (info->relocatable)
5218 return TRUE;
5219
5220 /* Make sure we don't attach the glue sections to a dynamic object. */
5221 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5222
5223 globals = elf32_arm_hash_table (info);
5224
5225 BFD_ASSERT (globals != NULL);
5226
5227 if (globals->bfd_of_glue_owner != NULL)
5228 return TRUE;
5229
5230 /* Save the bfd for later use. */
5231 globals->bfd_of_glue_owner = abfd;
5232
5233 return TRUE;
5234 }
5235
5236 static void
5237 check_use_blx (struct elf32_arm_link_hash_table *globals)
5238 {
5239 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5240 Tag_CPU_arch) > 2)
5241 globals->use_blx = 1;
5242 }
5243
5244 bfd_boolean
5245 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5246 struct bfd_link_info *link_info)
5247 {
5248 Elf_Internal_Shdr *symtab_hdr;
5249 Elf_Internal_Rela *internal_relocs = NULL;
5250 Elf_Internal_Rela *irel, *irelend;
5251 bfd_byte *contents = NULL;
5252
5253 asection *sec;
5254 struct elf32_arm_link_hash_table *globals;
5255
5256 /* If we are only performing a partial link do not bother
5257 to construct any glue. */
5258 if (link_info->relocatable)
5259 return TRUE;
5260
5261 /* Here we have a bfd that is to be included on the link. We have a
5262 hook to do reloc rummaging, before section sizes are nailed down. */
5263 globals = elf32_arm_hash_table (link_info);
5264
5265 BFD_ASSERT (globals != NULL);
5266
5267 check_use_blx (globals);
5268
5269 if (globals->byteswap_code && !bfd_big_endian (abfd))
5270 {
5271 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5272 abfd);
5273 return FALSE;
5274 }
5275
5276 /* PR 5398: If we have not decided to include any loadable sections in
5277 the output then we will not have a glue owner bfd. This is OK, it
5278 just means that there is nothing else for us to do here. */
5279 if (globals->bfd_of_glue_owner == NULL)
5280 return TRUE;
5281
5282 /* Rummage around all the relocs and map the glue vectors. */
5283 sec = abfd->sections;
5284
5285 if (sec == NULL)
5286 return TRUE;
5287
5288 for (; sec != NULL; sec = sec->next)
5289 {
5290 if (sec->reloc_count == 0)
5291 continue;
5292
5293 if ((sec->flags & SEC_EXCLUDE) != 0)
5294 continue;
5295
5296 symtab_hdr = & elf_symtab_hdr (abfd);
5297
5298 /* Load the relocs. */
5299 internal_relocs
5300 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5301
5302 if (internal_relocs == NULL)
5303 goto error_return;
5304
5305 irelend = internal_relocs + sec->reloc_count;
5306 for (irel = internal_relocs; irel < irelend; irel++)
5307 {
5308 long r_type;
5309 unsigned long r_index;
5310
5311 struct elf_link_hash_entry *h;
5312
5313 r_type = ELF32_R_TYPE (irel->r_info);
5314 r_index = ELF32_R_SYM (irel->r_info);
5315
5316 /* These are the only relocation types we care about. */
5317 if ( r_type != R_ARM_PC24
5318 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5319 continue;
5320
5321 /* Get the section contents if we haven't done so already. */
5322 if (contents == NULL)
5323 {
5324 /* Get cached copy if it exists. */
5325 if (elf_section_data (sec)->this_hdr.contents != NULL)
5326 contents = elf_section_data (sec)->this_hdr.contents;
5327 else
5328 {
5329 /* Go get them off disk. */
5330 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5331 goto error_return;
5332 }
5333 }
5334
5335 if (r_type == R_ARM_V4BX)
5336 {
5337 int reg;
5338
5339 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5340 record_arm_bx_glue (link_info, reg);
5341 continue;
5342 }
5343
5344 /* If the relocation is not against a symbol it cannot concern us. */
5345 h = NULL;
5346
5347 /* We don't care about local symbols. */
5348 if (r_index < symtab_hdr->sh_info)
5349 continue;
5350
5351 /* This is an external symbol. */
5352 r_index -= symtab_hdr->sh_info;
5353 h = (struct elf_link_hash_entry *)
5354 elf_sym_hashes (abfd)[r_index];
5355
5356 /* If the relocation is against a static symbol it must be within
5357 the current section and so cannot be a cross ARM/Thumb relocation. */
5358 if (h == NULL)
5359 continue;
5360
5361 /* If the call will go through a PLT entry then we do not need
5362 glue. */
5363 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5364 continue;
5365
5366 switch (r_type)
5367 {
5368 case R_ARM_PC24:
5369 /* This one is a call from arm code. We need to look up
5370 the target of the call. If it is a thumb target, we
5371 insert glue. */
5372 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5373 record_arm_to_thumb_glue (link_info, h);
5374 break;
5375
5376 default:
5377 abort ();
5378 }
5379 }
5380
5381 if (contents != NULL
5382 && elf_section_data (sec)->this_hdr.contents != contents)
5383 free (contents);
5384 contents = NULL;
5385
5386 if (internal_relocs != NULL
5387 && elf_section_data (sec)->relocs != internal_relocs)
5388 free (internal_relocs);
5389 internal_relocs = NULL;
5390 }
5391
5392 return TRUE;
5393
5394 error_return:
5395 if (contents != NULL
5396 && elf_section_data (sec)->this_hdr.contents != contents)
5397 free (contents);
5398 if (internal_relocs != NULL
5399 && elf_section_data (sec)->relocs != internal_relocs)
5400 free (internal_relocs);
5401
5402 return FALSE;
5403 }
5404 #endif
5405
5406
5407 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5408
5409 void
5410 bfd_elf32_arm_init_maps (bfd *abfd)
5411 {
5412 Elf_Internal_Sym *isymbuf;
5413 Elf_Internal_Shdr *hdr;
5414 unsigned int i, localsyms;
5415
5416 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5417 if (! is_arm_elf (abfd))
5418 return;
5419
5420 if ((abfd->flags & DYNAMIC) != 0)
5421 return;
5422
5423 hdr = & elf_symtab_hdr (abfd);
5424 localsyms = hdr->sh_info;
5425
5426 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5427 should contain the number of local symbols, which should come before any
5428 global symbols. Mapping symbols are always local. */
5429 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5430 NULL);
5431
5432 /* No internal symbols read? Skip this BFD. */
5433 if (isymbuf == NULL)
5434 return;
5435
5436 for (i = 0; i < localsyms; i++)
5437 {
5438 Elf_Internal_Sym *isym = &isymbuf[i];
5439 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5440 const char *name;
5441
5442 if (sec != NULL
5443 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5444 {
5445 name = bfd_elf_string_from_elf_section (abfd,
5446 hdr->sh_link, isym->st_name);
5447
5448 if (bfd_is_arm_special_symbol_name (name,
5449 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5450 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5451 }
5452 }
5453 }
5454
5455
5456 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5457 say what they wanted. */
5458
5459 void
5460 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5461 {
5462 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5463 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5464
5465 if (globals->fix_cortex_a8 == -1)
5466 {
5467 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5468 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5469 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5470 || out_attr[Tag_CPU_arch_profile].i == 0))
5471 globals->fix_cortex_a8 = 1;
5472 else
5473 globals->fix_cortex_a8 = 0;
5474 }
5475 }
5476
5477
5478 void
5479 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5480 {
5481 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5482 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5483
5484 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5485 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5486 {
5487 switch (globals->vfp11_fix)
5488 {
5489 case BFD_ARM_VFP11_FIX_DEFAULT:
5490 case BFD_ARM_VFP11_FIX_NONE:
5491 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5492 break;
5493
5494 default:
5495 /* Give a warning, but do as the user requests anyway. */
5496 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5497 "workaround is not necessary for target architecture"), obfd);
5498 }
5499 }
5500 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5501 /* For earlier architectures, we might need the workaround, but do not
5502 enable it by default. If users is running with broken hardware, they
5503 must enable the erratum fix explicitly. */
5504 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5505 }
5506
5507
5508 enum bfd_arm_vfp11_pipe
5509 {
5510 VFP11_FMAC,
5511 VFP11_LS,
5512 VFP11_DS,
5513 VFP11_BAD
5514 };
5515
5516 /* Return a VFP register number. This is encoded as RX:X for single-precision
5517 registers, or X:RX for double-precision registers, where RX is the group of
5518 four bits in the instruction encoding and X is the single extension bit.
5519 RX and X fields are specified using their lowest (starting) bit. The return
5520 value is:
5521
5522 0...31: single-precision registers s0...s31
5523 32...63: double-precision registers d0...d31.
5524
5525 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5526 encounter VFP3 instructions, so we allow the full range for DP registers. */
5527
5528 static unsigned int
5529 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5530 unsigned int x)
5531 {
5532 if (is_double)
5533 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5534 else
5535 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5536 }
5537
5538 /* Set bits in *WMASK according to a register number REG as encoded by
5539 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5540
5541 static void
5542 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5543 {
5544 if (reg < 32)
5545 *wmask |= 1 << reg;
5546 else if (reg < 48)
5547 *wmask |= 3 << ((reg - 32) * 2);
5548 }
5549
5550 /* Return TRUE if WMASK overwrites anything in REGS. */
5551
5552 static bfd_boolean
5553 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5554 {
5555 int i;
5556
5557 for (i = 0; i < numregs; i++)
5558 {
5559 unsigned int reg = regs[i];
5560
5561 if (reg < 32 && (wmask & (1 << reg)) != 0)
5562 return TRUE;
5563
5564 reg -= 32;
5565
5566 if (reg >= 16)
5567 continue;
5568
5569 if ((wmask & (3 << (reg * 2))) != 0)
5570 return TRUE;
5571 }
5572
5573 return FALSE;
5574 }
5575
5576 /* In this function, we're interested in two things: finding input registers
5577 for VFP data-processing instructions, and finding the set of registers which
5578 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5579 hold the written set, so FLDM etc. are easy to deal with (we're only
5580 interested in 32 SP registers or 16 dp registers, due to the VFP version
5581 implemented by the chip in question). DP registers are marked by setting
5582 both SP registers in the write mask). */
5583
5584 static enum bfd_arm_vfp11_pipe
5585 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5586 int *numregs)
5587 {
5588 enum bfd_arm_vfp11_pipe pipe = VFP11_BAD;
5589 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5590
5591 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5592 {
5593 unsigned int pqrs;
5594 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5595 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5596
5597 pqrs = ((insn & 0x00800000) >> 20)
5598 | ((insn & 0x00300000) >> 19)
5599 | ((insn & 0x00000040) >> 6);
5600
5601 switch (pqrs)
5602 {
5603 case 0: /* fmac[sd]. */
5604 case 1: /* fnmac[sd]. */
5605 case 2: /* fmsc[sd]. */
5606 case 3: /* fnmsc[sd]. */
5607 pipe = VFP11_FMAC;
5608 bfd_arm_vfp11_write_mask (destmask, fd);
5609 regs[0] = fd;
5610 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5611 regs[2] = fm;
5612 *numregs = 3;
5613 break;
5614
5615 case 4: /* fmul[sd]. */
5616 case 5: /* fnmul[sd]. */
5617 case 6: /* fadd[sd]. */
5618 case 7: /* fsub[sd]. */
5619 pipe = VFP11_FMAC;
5620 goto vfp_binop;
5621
5622 case 8: /* fdiv[sd]. */
5623 pipe = VFP11_DS;
5624 vfp_binop:
5625 bfd_arm_vfp11_write_mask (destmask, fd);
5626 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5627 regs[1] = fm;
5628 *numregs = 2;
5629 break;
5630
5631 case 15: /* extended opcode. */
5632 {
5633 unsigned int extn = ((insn >> 15) & 0x1e)
5634 | ((insn >> 7) & 1);
5635
5636 switch (extn)
5637 {
5638 case 0: /* fcpy[sd]. */
5639 case 1: /* fabs[sd]. */
5640 case 2: /* fneg[sd]. */
5641 case 8: /* fcmp[sd]. */
5642 case 9: /* fcmpe[sd]. */
5643 case 10: /* fcmpz[sd]. */
5644 case 11: /* fcmpez[sd]. */
5645 case 16: /* fuito[sd]. */
5646 case 17: /* fsito[sd]. */
5647 case 24: /* ftoui[sd]. */
5648 case 25: /* ftouiz[sd]. */
5649 case 26: /* ftosi[sd]. */
5650 case 27: /* ftosiz[sd]. */
5651 /* These instructions will not bounce due to underflow. */
5652 *numregs = 0;
5653 pipe = VFP11_FMAC;
5654 break;
5655
5656 case 3: /* fsqrt[sd]. */
5657 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5658 registers to cause the erratum in previous instructions. */
5659 bfd_arm_vfp11_write_mask (destmask, fd);
5660 pipe = VFP11_DS;
5661 break;
5662
5663 case 15: /* fcvt{ds,sd}. */
5664 {
5665 int rnum = 0;
5666
5667 bfd_arm_vfp11_write_mask (destmask, fd);
5668
5669 /* Only FCVTSD can underflow. */
5670 if ((insn & 0x100) != 0)
5671 regs[rnum++] = fm;
5672
5673 *numregs = rnum;
5674
5675 pipe = VFP11_FMAC;
5676 }
5677 break;
5678
5679 default:
5680 return VFP11_BAD;
5681 }
5682 }
5683 break;
5684
5685 default:
5686 return VFP11_BAD;
5687 }
5688 }
5689 /* Two-register transfer. */
5690 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5691 {
5692 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5693
5694 if ((insn & 0x100000) == 0)
5695 {
5696 if (is_double)
5697 bfd_arm_vfp11_write_mask (destmask, fm);
5698 else
5699 {
5700 bfd_arm_vfp11_write_mask (destmask, fm);
5701 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5702 }
5703 }
5704
5705 pipe = VFP11_LS;
5706 }
5707 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5708 {
5709 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5710 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5711
5712 switch (puw)
5713 {
5714 case 0: /* Two-reg transfer. We should catch these above. */
5715 abort ();
5716
5717 case 2: /* fldm[sdx]. */
5718 case 3:
5719 case 5:
5720 {
5721 unsigned int i, offset = insn & 0xff;
5722
5723 if (is_double)
5724 offset >>= 1;
5725
5726 for (i = fd; i < fd + offset; i++)
5727 bfd_arm_vfp11_write_mask (destmask, i);
5728 }
5729 break;
5730
5731 case 4: /* fld[sd]. */
5732 case 6:
5733 bfd_arm_vfp11_write_mask (destmask, fd);
5734 break;
5735
5736 default:
5737 return VFP11_BAD;
5738 }
5739
5740 pipe = VFP11_LS;
5741 }
5742 /* Single-register transfer. Note L==0. */
5743 else if ((insn & 0x0f100e10) == 0x0e000a10)
5744 {
5745 unsigned int opcode = (insn >> 21) & 7;
5746 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5747
5748 switch (opcode)
5749 {
5750 case 0: /* fmsr/fmdlr. */
5751 case 1: /* fmdhr. */
5752 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5753 destination register. I don't know if this is exactly right,
5754 but it is the conservative choice. */
5755 bfd_arm_vfp11_write_mask (destmask, fn);
5756 break;
5757
5758 case 7: /* fmxr. */
5759 break;
5760 }
5761
5762 pipe = VFP11_LS;
5763 }
5764
5765 return pipe;
5766 }
5767
5768
5769 static int elf32_arm_compare_mapping (const void * a, const void * b);
5770
5771
5772 /* Look for potentially-troublesome code sequences which might trigger the
5773 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5774 (available from ARM) for details of the erratum. A short version is
5775 described in ld.texinfo. */
5776
5777 bfd_boolean
5778 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5779 {
5780 asection *sec;
5781 bfd_byte *contents = NULL;
5782 int state = 0;
5783 int regs[3], numregs = 0;
5784 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5785 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5786
5787 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5788 The states transition as follows:
5789
5790 0 -> 1 (vector) or 0 -> 2 (scalar)
5791 A VFP FMAC-pipeline instruction has been seen. Fill
5792 regs[0]..regs[numregs-1] with its input operands. Remember this
5793 instruction in 'first_fmac'.
5794
5795 1 -> 2
5796 Any instruction, except for a VFP instruction which overwrites
5797 regs[*].
5798
5799 1 -> 3 [ -> 0 ] or
5800 2 -> 3 [ -> 0 ]
5801 A VFP instruction has been seen which overwrites any of regs[*].
5802 We must make a veneer! Reset state to 0 before examining next
5803 instruction.
5804
5805 2 -> 0
5806 If we fail to match anything in state 2, reset to state 0 and reset
5807 the instruction pointer to the instruction after 'first_fmac'.
5808
5809 If the VFP11 vector mode is in use, there must be at least two unrelated
5810 instructions between anti-dependent VFP11 instructions to properly avoid
5811 triggering the erratum, hence the use of the extra state 1. */
5812
5813 /* If we are only performing a partial link do not bother
5814 to construct any glue. */
5815 if (link_info->relocatable)
5816 return TRUE;
5817
5818 /* Skip if this bfd does not correspond to an ELF image. */
5819 if (! is_arm_elf (abfd))
5820 return TRUE;
5821
5822 /* We should have chosen a fix type by the time we get here. */
5823 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5824
5825 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5826 return TRUE;
5827
5828 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5829 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5830 return TRUE;
5831
5832 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5833 {
5834 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5835 struct _arm_elf_section_data *sec_data;
5836
5837 /* If we don't have executable progbits, we're not interested in this
5838 section. Also skip if section is to be excluded. */
5839 if (elf_section_type (sec) != SHT_PROGBITS
5840 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5841 || (sec->flags & SEC_EXCLUDE) != 0
5842 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5843 || sec->output_section == bfd_abs_section_ptr
5844 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5845 continue;
5846
5847 sec_data = elf32_arm_section_data (sec);
5848
5849 if (sec_data->mapcount == 0)
5850 continue;
5851
5852 if (elf_section_data (sec)->this_hdr.contents != NULL)
5853 contents = elf_section_data (sec)->this_hdr.contents;
5854 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5855 goto error_return;
5856
5857 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
5858 elf32_arm_compare_mapping);
5859
5860 for (span = 0; span < sec_data->mapcount; span++)
5861 {
5862 unsigned int span_start = sec_data->map[span].vma;
5863 unsigned int span_end = (span == sec_data->mapcount - 1)
5864 ? sec->size : sec_data->map[span + 1].vma;
5865 char span_type = sec_data->map[span].type;
5866
5867 /* FIXME: Only ARM mode is supported at present. We may need to
5868 support Thumb-2 mode also at some point. */
5869 if (span_type != 'a')
5870 continue;
5871
5872 for (i = span_start; i < span_end;)
5873 {
5874 unsigned int next_i = i + 4;
5875 unsigned int insn = bfd_big_endian (abfd)
5876 ? (contents[i] << 24)
5877 | (contents[i + 1] << 16)
5878 | (contents[i + 2] << 8)
5879 | contents[i + 3]
5880 : (contents[i + 3] << 24)
5881 | (contents[i + 2] << 16)
5882 | (contents[i + 1] << 8)
5883 | contents[i];
5884 unsigned int writemask = 0;
5885 enum bfd_arm_vfp11_pipe pipe;
5886
5887 switch (state)
5888 {
5889 case 0:
5890 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
5891 &numregs);
5892 /* I'm assuming the VFP11 erratum can trigger with denorm
5893 operands on either the FMAC or the DS pipeline. This might
5894 lead to slightly overenthusiastic veneer insertion. */
5895 if (pipe == VFP11_FMAC || pipe == VFP11_DS)
5896 {
5897 state = use_vector ? 1 : 2;
5898 first_fmac = i;
5899 veneer_of_insn = insn;
5900 }
5901 break;
5902
5903 case 1:
5904 {
5905 int other_regs[3], other_numregs;
5906 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5907 other_regs,
5908 &other_numregs);
5909 if (pipe != VFP11_BAD
5910 && bfd_arm_vfp11_antidependency (writemask, regs,
5911 numregs))
5912 state = 3;
5913 else
5914 state = 2;
5915 }
5916 break;
5917
5918 case 2:
5919 {
5920 int other_regs[3], other_numregs;
5921 pipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
5922 other_regs,
5923 &other_numregs);
5924 if (pipe != VFP11_BAD
5925 && bfd_arm_vfp11_antidependency (writemask, regs,
5926 numregs))
5927 state = 3;
5928 else
5929 {
5930 state = 0;
5931 next_i = first_fmac + 4;
5932 }
5933 }
5934 break;
5935
5936 case 3:
5937 abort (); /* Should be unreachable. */
5938 }
5939
5940 if (state == 3)
5941 {
5942 elf32_vfp11_erratum_list *newerr
5943 = bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5944 int errcount;
5945
5946 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
5947
5948 newerr->u.b.vfp_insn = veneer_of_insn;
5949
5950 switch (span_type)
5951 {
5952 case 'a':
5953 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
5954 break;
5955
5956 default:
5957 abort ();
5958 }
5959
5960 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
5961 first_fmac);
5962
5963 newerr->vma = -1;
5964
5965 newerr->next = sec_data->erratumlist;
5966 sec_data->erratumlist = newerr;
5967
5968 state = 0;
5969 }
5970
5971 i = next_i;
5972 }
5973 }
5974
5975 if (contents != NULL
5976 && elf_section_data (sec)->this_hdr.contents != contents)
5977 free (contents);
5978 contents = NULL;
5979 }
5980
5981 return TRUE;
5982
5983 error_return:
5984 if (contents != NULL
5985 && elf_section_data (sec)->this_hdr.contents != contents)
5986 free (contents);
5987
5988 return FALSE;
5989 }
5990
5991 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
5992 after sections have been laid out, using specially-named symbols. */
5993
5994 void
5995 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
5996 struct bfd_link_info *link_info)
5997 {
5998 asection *sec;
5999 struct elf32_arm_link_hash_table *globals;
6000 char *tmp_name;
6001
6002 if (link_info->relocatable)
6003 return;
6004
6005 /* Skip if this bfd does not correspond to an ELF image. */
6006 if (! is_arm_elf (abfd))
6007 return;
6008
6009 globals = elf32_arm_hash_table (link_info);
6010
6011 tmp_name = bfd_malloc ((bfd_size_type) strlen
6012 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6013
6014 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6015 {
6016 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6017 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6018
6019 for (; errnode != NULL; errnode = errnode->next)
6020 {
6021 struct elf_link_hash_entry *myh;
6022 bfd_vma vma;
6023
6024 switch (errnode->type)
6025 {
6026 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6027 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6028 /* Find veneer symbol. */
6029 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6030 errnode->u.b.veneer->u.v.id);
6031
6032 myh = elf_link_hash_lookup
6033 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6034
6035 if (myh == NULL)
6036 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6037 "`%s'"), abfd, tmp_name);
6038
6039 vma = myh->root.u.def.section->output_section->vma
6040 + myh->root.u.def.section->output_offset
6041 + myh->root.u.def.value;
6042
6043 errnode->u.b.veneer->vma = vma;
6044 break;
6045
6046 case VFP11_ERRATUM_ARM_VENEER:
6047 case VFP11_ERRATUM_THUMB_VENEER:
6048 /* Find return location. */
6049 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6050 errnode->u.v.id);
6051
6052 myh = elf_link_hash_lookup
6053 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6054
6055 if (myh == NULL)
6056 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6057 "`%s'"), abfd, tmp_name);
6058
6059 vma = myh->root.u.def.section->output_section->vma
6060 + myh->root.u.def.section->output_offset
6061 + myh->root.u.def.value;
6062
6063 errnode->u.v.branch->vma = vma;
6064 break;
6065
6066 default:
6067 abort ();
6068 }
6069 }
6070 }
6071
6072 free (tmp_name);
6073 }
6074
6075
6076 /* Set target relocation values needed during linking. */
6077
6078 void
6079 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6080 struct bfd_link_info *link_info,
6081 int target1_is_rel,
6082 char * target2_type,
6083 int fix_v4bx,
6084 int use_blx,
6085 bfd_arm_vfp11_fix vfp11_fix,
6086 int no_enum_warn, int no_wchar_warn,
6087 int pic_veneer, int fix_cortex_a8)
6088 {
6089 struct elf32_arm_link_hash_table *globals;
6090
6091 globals = elf32_arm_hash_table (link_info);
6092
6093 globals->target1_is_rel = target1_is_rel;
6094 if (strcmp (target2_type, "rel") == 0)
6095 globals->target2_reloc = R_ARM_REL32;
6096 else if (strcmp (target2_type, "abs") == 0)
6097 globals->target2_reloc = R_ARM_ABS32;
6098 else if (strcmp (target2_type, "got-rel") == 0)
6099 globals->target2_reloc = R_ARM_GOT_PREL;
6100 else
6101 {
6102 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6103 target2_type);
6104 }
6105 globals->fix_v4bx = fix_v4bx;
6106 globals->use_blx |= use_blx;
6107 globals->vfp11_fix = vfp11_fix;
6108 globals->pic_veneer = pic_veneer;
6109 globals->fix_cortex_a8 = fix_cortex_a8;
6110
6111 BFD_ASSERT (is_arm_elf (output_bfd));
6112 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6113 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6114 }
6115
6116 /* Replace the target offset of a Thumb bl or b.w instruction. */
6117
6118 static void
6119 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6120 {
6121 bfd_vma upper;
6122 bfd_vma lower;
6123 int reloc_sign;
6124
6125 BFD_ASSERT ((offset & 1) == 0);
6126
6127 upper = bfd_get_16 (abfd, insn);
6128 lower = bfd_get_16 (abfd, insn + 2);
6129 reloc_sign = (offset < 0) ? 1 : 0;
6130 upper = (upper & ~(bfd_vma) 0x7ff)
6131 | ((offset >> 12) & 0x3ff)
6132 | (reloc_sign << 10);
6133 lower = (lower & ~(bfd_vma) 0x2fff)
6134 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6135 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6136 | ((offset >> 1) & 0x7ff);
6137 bfd_put_16 (abfd, upper, insn);
6138 bfd_put_16 (abfd, lower, insn + 2);
6139 }
6140
6141 /* Thumb code calling an ARM function. */
6142
6143 static int
6144 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6145 const char * name,
6146 bfd * input_bfd,
6147 bfd * output_bfd,
6148 asection * input_section,
6149 bfd_byte * hit_data,
6150 asection * sym_sec,
6151 bfd_vma offset,
6152 bfd_signed_vma addend,
6153 bfd_vma val,
6154 char **error_message)
6155 {
6156 asection * s = 0;
6157 bfd_vma my_offset;
6158 long int ret_offset;
6159 struct elf_link_hash_entry * myh;
6160 struct elf32_arm_link_hash_table * globals;
6161
6162 myh = find_thumb_glue (info, name, error_message);
6163 if (myh == NULL)
6164 return FALSE;
6165
6166 globals = elf32_arm_hash_table (info);
6167
6168 BFD_ASSERT (globals != NULL);
6169 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6170
6171 my_offset = myh->root.u.def.value;
6172
6173 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6174 THUMB2ARM_GLUE_SECTION_NAME);
6175
6176 BFD_ASSERT (s != NULL);
6177 BFD_ASSERT (s->contents != NULL);
6178 BFD_ASSERT (s->output_section != NULL);
6179
6180 if ((my_offset & 0x01) == 0x01)
6181 {
6182 if (sym_sec != NULL
6183 && sym_sec->owner != NULL
6184 && !INTERWORK_FLAG (sym_sec->owner))
6185 {
6186 (*_bfd_error_handler)
6187 (_("%B(%s): warning: interworking not enabled.\n"
6188 " first occurrence: %B: thumb call to arm"),
6189 sym_sec->owner, input_bfd, name);
6190
6191 return FALSE;
6192 }
6193
6194 --my_offset;
6195 myh->root.u.def.value = my_offset;
6196
6197 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6198 s->contents + my_offset);
6199
6200 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6201 s->contents + my_offset + 2);
6202
6203 ret_offset =
6204 /* Address of destination of the stub. */
6205 ((bfd_signed_vma) val)
6206 - ((bfd_signed_vma)
6207 /* Offset from the start of the current section
6208 to the start of the stubs. */
6209 (s->output_offset
6210 /* Offset of the start of this stub from the start of the stubs. */
6211 + my_offset
6212 /* Address of the start of the current section. */
6213 + s->output_section->vma)
6214 /* The branch instruction is 4 bytes into the stub. */
6215 + 4
6216 /* ARM branches work from the pc of the instruction + 8. */
6217 + 8);
6218
6219 put_arm_insn (globals, output_bfd,
6220 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6221 s->contents + my_offset + 4);
6222 }
6223
6224 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6225
6226 /* Now go back and fix up the original BL insn to point to here. */
6227 ret_offset =
6228 /* Address of where the stub is located. */
6229 (s->output_section->vma + s->output_offset + my_offset)
6230 /* Address of where the BL is located. */
6231 - (input_section->output_section->vma + input_section->output_offset
6232 + offset)
6233 /* Addend in the relocation. */
6234 - addend
6235 /* Biassing for PC-relative addressing. */
6236 - 8;
6237
6238 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6239
6240 return TRUE;
6241 }
6242
6243 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6244
6245 static struct elf_link_hash_entry *
6246 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6247 const char * name,
6248 bfd * input_bfd,
6249 bfd * output_bfd,
6250 asection * sym_sec,
6251 bfd_vma val,
6252 asection * s,
6253 char ** error_message)
6254 {
6255 bfd_vma my_offset;
6256 long int ret_offset;
6257 struct elf_link_hash_entry * myh;
6258 struct elf32_arm_link_hash_table * globals;
6259
6260 myh = find_arm_glue (info, name, error_message);
6261 if (myh == NULL)
6262 return NULL;
6263
6264 globals = elf32_arm_hash_table (info);
6265
6266 BFD_ASSERT (globals != NULL);
6267 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6268
6269 my_offset = myh->root.u.def.value;
6270
6271 if ((my_offset & 0x01) == 0x01)
6272 {
6273 if (sym_sec != NULL
6274 && sym_sec->owner != NULL
6275 && !INTERWORK_FLAG (sym_sec->owner))
6276 {
6277 (*_bfd_error_handler)
6278 (_("%B(%s): warning: interworking not enabled.\n"
6279 " first occurrence: %B: arm call to thumb"),
6280 sym_sec->owner, input_bfd, name);
6281 }
6282
6283 --my_offset;
6284 myh->root.u.def.value = my_offset;
6285
6286 if (info->shared || globals->root.is_relocatable_executable
6287 || globals->pic_veneer)
6288 {
6289 /* For relocatable objects we can't use absolute addresses,
6290 so construct the address from a relative offset. */
6291 /* TODO: If the offset is small it's probably worth
6292 constructing the address with adds. */
6293 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6294 s->contents + my_offset);
6295 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6296 s->contents + my_offset + 4);
6297 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6298 s->contents + my_offset + 8);
6299 /* Adjust the offset by 4 for the position of the add,
6300 and 8 for the pipeline offset. */
6301 ret_offset = (val - (s->output_offset
6302 + s->output_section->vma
6303 + my_offset + 12))
6304 | 1;
6305 bfd_put_32 (output_bfd, ret_offset,
6306 s->contents + my_offset + 12);
6307 }
6308 else if (globals->use_blx)
6309 {
6310 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6311 s->contents + my_offset);
6312
6313 /* It's a thumb address. Add the low order bit. */
6314 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6315 s->contents + my_offset + 4);
6316 }
6317 else
6318 {
6319 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6320 s->contents + my_offset);
6321
6322 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6323 s->contents + my_offset + 4);
6324
6325 /* It's a thumb address. Add the low order bit. */
6326 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6327 s->contents + my_offset + 8);
6328
6329 my_offset += 12;
6330 }
6331 }
6332
6333 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6334
6335 return myh;
6336 }
6337
6338 /* Arm code calling a Thumb function. */
6339
6340 static int
6341 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6342 const char * name,
6343 bfd * input_bfd,
6344 bfd * output_bfd,
6345 asection * input_section,
6346 bfd_byte * hit_data,
6347 asection * sym_sec,
6348 bfd_vma offset,
6349 bfd_signed_vma addend,
6350 bfd_vma val,
6351 char **error_message)
6352 {
6353 unsigned long int tmp;
6354 bfd_vma my_offset;
6355 asection * s;
6356 long int ret_offset;
6357 struct elf_link_hash_entry * myh;
6358 struct elf32_arm_link_hash_table * globals;
6359
6360 globals = elf32_arm_hash_table (info);
6361
6362 BFD_ASSERT (globals != NULL);
6363 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6364
6365 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6366 ARM2THUMB_GLUE_SECTION_NAME);
6367 BFD_ASSERT (s != NULL);
6368 BFD_ASSERT (s->contents != NULL);
6369 BFD_ASSERT (s->output_section != NULL);
6370
6371 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6372 sym_sec, val, s, error_message);
6373 if (!myh)
6374 return FALSE;
6375
6376 my_offset = myh->root.u.def.value;
6377 tmp = bfd_get_32 (input_bfd, hit_data);
6378 tmp = tmp & 0xFF000000;
6379
6380 /* Somehow these are both 4 too far, so subtract 8. */
6381 ret_offset = (s->output_offset
6382 + my_offset
6383 + s->output_section->vma
6384 - (input_section->output_offset
6385 + input_section->output_section->vma
6386 + offset + addend)
6387 - 8);
6388
6389 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6390
6391 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6392
6393 return TRUE;
6394 }
6395
6396 /* Populate Arm stub for an exported Thumb function. */
6397
6398 static bfd_boolean
6399 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6400 {
6401 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6402 asection * s;
6403 struct elf_link_hash_entry * myh;
6404 struct elf32_arm_link_hash_entry *eh;
6405 struct elf32_arm_link_hash_table * globals;
6406 asection *sec;
6407 bfd_vma val;
6408 char *error_message;
6409
6410 eh = elf32_arm_hash_entry (h);
6411 /* Allocate stubs for exported Thumb functions on v4t. */
6412 if (eh->export_glue == NULL)
6413 return TRUE;
6414
6415 globals = elf32_arm_hash_table (info);
6416
6417 BFD_ASSERT (globals != NULL);
6418 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6419
6420 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6421 ARM2THUMB_GLUE_SECTION_NAME);
6422 BFD_ASSERT (s != NULL);
6423 BFD_ASSERT (s->contents != NULL);
6424 BFD_ASSERT (s->output_section != NULL);
6425
6426 sec = eh->export_glue->root.u.def.section;
6427
6428 BFD_ASSERT (sec->output_section != NULL);
6429
6430 val = eh->export_glue->root.u.def.value + sec->output_offset
6431 + sec->output_section->vma;
6432
6433 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6434 h->root.u.def.section->owner,
6435 globals->obfd, sec, val, s,
6436 &error_message);
6437 BFD_ASSERT (myh);
6438 return TRUE;
6439 }
6440
6441 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6442
6443 static bfd_vma
6444 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6445 {
6446 bfd_byte *p;
6447 bfd_vma glue_addr;
6448 asection *s;
6449 struct elf32_arm_link_hash_table *globals;
6450
6451 globals = elf32_arm_hash_table (info);
6452
6453 BFD_ASSERT (globals != NULL);
6454 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6455
6456 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6457 ARM_BX_GLUE_SECTION_NAME);
6458 BFD_ASSERT (s != NULL);
6459 BFD_ASSERT (s->contents != NULL);
6460 BFD_ASSERT (s->output_section != NULL);
6461
6462 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6463
6464 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6465
6466 if ((globals->bx_glue_offset[reg] & 1) == 0)
6467 {
6468 p = s->contents + glue_addr;
6469 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6470 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6471 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6472 globals->bx_glue_offset[reg] |= 1;
6473 }
6474
6475 return glue_addr + s->output_section->vma + s->output_offset;
6476 }
6477
6478 /* Generate Arm stubs for exported Thumb symbols. */
6479 static void
6480 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6481 struct bfd_link_info *link_info)
6482 {
6483 struct elf32_arm_link_hash_table * globals;
6484
6485 if (link_info == NULL)
6486 /* Ignore this if we are not called by the ELF backend linker. */
6487 return;
6488
6489 globals = elf32_arm_hash_table (link_info);
6490 /* If blx is available then exported Thumb symbols are OK and there is
6491 nothing to do. */
6492 if (globals->use_blx)
6493 return;
6494
6495 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6496 link_info);
6497 }
6498
6499 /* Some relocations map to different relocations depending on the
6500 target. Return the real relocation. */
6501
6502 static int
6503 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6504 int r_type)
6505 {
6506 switch (r_type)
6507 {
6508 case R_ARM_TARGET1:
6509 if (globals->target1_is_rel)
6510 return R_ARM_REL32;
6511 else
6512 return R_ARM_ABS32;
6513
6514 case R_ARM_TARGET2:
6515 return globals->target2_reloc;
6516
6517 default:
6518 return r_type;
6519 }
6520 }
6521
6522 /* Return the base VMA address which should be subtracted from real addresses
6523 when resolving @dtpoff relocation.
6524 This is PT_TLS segment p_vaddr. */
6525
6526 static bfd_vma
6527 dtpoff_base (struct bfd_link_info *info)
6528 {
6529 /* If tls_sec is NULL, we should have signalled an error already. */
6530 if (elf_hash_table (info)->tls_sec == NULL)
6531 return 0;
6532 return elf_hash_table (info)->tls_sec->vma;
6533 }
6534
6535 /* Return the relocation value for @tpoff relocation
6536 if STT_TLS virtual address is ADDRESS. */
6537
6538 static bfd_vma
6539 tpoff (struct bfd_link_info *info, bfd_vma address)
6540 {
6541 struct elf_link_hash_table *htab = elf_hash_table (info);
6542 bfd_vma base;
6543
6544 /* If tls_sec is NULL, we should have signalled an error already. */
6545 if (htab->tls_sec == NULL)
6546 return 0;
6547 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6548 return address - htab->tls_sec->vma + base;
6549 }
6550
6551 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6552 VALUE is the relocation value. */
6553
6554 static bfd_reloc_status_type
6555 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6556 {
6557 if (value > 0xfff)
6558 return bfd_reloc_overflow;
6559
6560 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6561 bfd_put_32 (abfd, value, data);
6562 return bfd_reloc_ok;
6563 }
6564
6565 /* For a given value of n, calculate the value of G_n as required to
6566 deal with group relocations. We return it in the form of an
6567 encoded constant-and-rotation, together with the final residual. If n is
6568 specified as less than zero, then final_residual is filled with the
6569 input value and no further action is performed. */
6570
6571 static bfd_vma
6572 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6573 {
6574 int current_n;
6575 bfd_vma g_n;
6576 bfd_vma encoded_g_n = 0;
6577 bfd_vma residual = value; /* Also known as Y_n. */
6578
6579 for (current_n = 0; current_n <= n; current_n++)
6580 {
6581 int shift;
6582
6583 /* Calculate which part of the value to mask. */
6584 if (residual == 0)
6585 shift = 0;
6586 else
6587 {
6588 int msb;
6589
6590 /* Determine the most significant bit in the residual and
6591 align the resulting value to a 2-bit boundary. */
6592 for (msb = 30; msb >= 0; msb -= 2)
6593 if (residual & (3 << msb))
6594 break;
6595
6596 /* The desired shift is now (msb - 6), or zero, whichever
6597 is the greater. */
6598 shift = msb - 6;
6599 if (shift < 0)
6600 shift = 0;
6601 }
6602
6603 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6604 g_n = residual & (0xff << shift);
6605 encoded_g_n = (g_n >> shift)
6606 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6607
6608 /* Calculate the residual for the next time around. */
6609 residual &= ~g_n;
6610 }
6611
6612 *final_residual = residual;
6613
6614 return encoded_g_n;
6615 }
6616
6617 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6618 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6619
6620 static int
6621 identify_add_or_sub (bfd_vma insn)
6622 {
6623 int opcode = insn & 0x1e00000;
6624
6625 if (opcode == 1 << 23) /* ADD */
6626 return 1;
6627
6628 if (opcode == 1 << 22) /* SUB */
6629 return -1;
6630
6631 return 0;
6632 }
6633
6634 /* Perform a relocation as part of a final link. */
6635
6636 static bfd_reloc_status_type
6637 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6638 bfd * input_bfd,
6639 bfd * output_bfd,
6640 asection * input_section,
6641 bfd_byte * contents,
6642 Elf_Internal_Rela * rel,
6643 bfd_vma value,
6644 struct bfd_link_info * info,
6645 asection * sym_sec,
6646 const char * sym_name,
6647 int sym_flags,
6648 struct elf_link_hash_entry * h,
6649 bfd_boolean * unresolved_reloc_p,
6650 char ** error_message)
6651 {
6652 unsigned long r_type = howto->type;
6653 unsigned long r_symndx;
6654 bfd_byte * hit_data = contents + rel->r_offset;
6655 bfd * dynobj = NULL;
6656 Elf_Internal_Shdr * symtab_hdr;
6657 struct elf_link_hash_entry ** sym_hashes;
6658 bfd_vma * local_got_offsets;
6659 asection * sgot = NULL;
6660 asection * splt = NULL;
6661 asection * sreloc = NULL;
6662 bfd_vma addend;
6663 bfd_signed_vma signed_addend;
6664 struct elf32_arm_link_hash_table * globals;
6665
6666 globals = elf32_arm_hash_table (info);
6667
6668 BFD_ASSERT (is_arm_elf (input_bfd));
6669
6670 /* Some relocation types map to different relocations depending on the
6671 target. We pick the right one here. */
6672 r_type = arm_real_reloc_type (globals, r_type);
6673 if (r_type != howto->type)
6674 howto = elf32_arm_howto_from_type (r_type);
6675
6676 /* If the start address has been set, then set the EF_ARM_HASENTRY
6677 flag. Setting this more than once is redundant, but the cost is
6678 not too high, and it keeps the code simple.
6679
6680 The test is done here, rather than somewhere else, because the
6681 start address is only set just before the final link commences.
6682
6683 Note - if the user deliberately sets a start address of 0, the
6684 flag will not be set. */
6685 if (bfd_get_start_address (output_bfd) != 0)
6686 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6687
6688 dynobj = elf_hash_table (info)->dynobj;
6689 if (dynobj)
6690 {
6691 sgot = bfd_get_section_by_name (dynobj, ".got");
6692 splt = bfd_get_section_by_name (dynobj, ".plt");
6693 }
6694 symtab_hdr = & elf_symtab_hdr (input_bfd);
6695 sym_hashes = elf_sym_hashes (input_bfd);
6696 local_got_offsets = elf_local_got_offsets (input_bfd);
6697 r_symndx = ELF32_R_SYM (rel->r_info);
6698
6699 if (globals->use_rel)
6700 {
6701 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6702
6703 if (addend & ((howto->src_mask + 1) >> 1))
6704 {
6705 signed_addend = -1;
6706 signed_addend &= ~ howto->src_mask;
6707 signed_addend |= addend;
6708 }
6709 else
6710 signed_addend = addend;
6711 }
6712 else
6713 addend = signed_addend = rel->r_addend;
6714
6715 switch (r_type)
6716 {
6717 case R_ARM_NONE:
6718 /* We don't need to find a value for this symbol. It's just a
6719 marker. */
6720 *unresolved_reloc_p = FALSE;
6721 return bfd_reloc_ok;
6722
6723 case R_ARM_ABS12:
6724 if (!globals->vxworks_p)
6725 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6726
6727 case R_ARM_PC24:
6728 case R_ARM_ABS32:
6729 case R_ARM_ABS32_NOI:
6730 case R_ARM_REL32:
6731 case R_ARM_REL32_NOI:
6732 case R_ARM_CALL:
6733 case R_ARM_JUMP24:
6734 case R_ARM_XPC25:
6735 case R_ARM_PREL31:
6736 case R_ARM_PLT32:
6737 /* Handle relocations which should use the PLT entry. ABS32/REL32
6738 will use the symbol's value, which may point to a PLT entry, but we
6739 don't need to handle that here. If we created a PLT entry, all
6740 branches in this object should go to it, except if the PLT is too
6741 far away, in which case a long branch stub should be inserted. */
6742 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6743 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6744 && r_type != R_ARM_CALL
6745 && r_type != R_ARM_JUMP24
6746 && r_type != R_ARM_PLT32)
6747 && h != NULL
6748 && splt != NULL
6749 && h->plt.offset != (bfd_vma) -1)
6750 {
6751 /* If we've created a .plt section, and assigned a PLT entry to
6752 this function, it should not be known to bind locally. If
6753 it were, we would have cleared the PLT entry. */
6754 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6755
6756 value = (splt->output_section->vma
6757 + splt->output_offset
6758 + h->plt.offset);
6759 *unresolved_reloc_p = FALSE;
6760 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6761 contents, rel->r_offset, value,
6762 rel->r_addend);
6763 }
6764
6765 /* When generating a shared object or relocatable executable, these
6766 relocations are copied into the output file to be resolved at
6767 run time. */
6768 if ((info->shared || globals->root.is_relocatable_executable)
6769 && (input_section->flags & SEC_ALLOC)
6770 && !(elf32_arm_hash_table (info)->vxworks_p
6771 && strcmp (input_section->output_section->name,
6772 ".tls_vars") == 0)
6773 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6774 || !SYMBOL_CALLS_LOCAL (info, h))
6775 && (h == NULL
6776 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6777 || h->root.type != bfd_link_hash_undefweak)
6778 && r_type != R_ARM_PC24
6779 && r_type != R_ARM_CALL
6780 && r_type != R_ARM_JUMP24
6781 && r_type != R_ARM_PREL31
6782 && r_type != R_ARM_PLT32)
6783 {
6784 Elf_Internal_Rela outrel;
6785 bfd_byte *loc;
6786 bfd_boolean skip, relocate;
6787
6788 *unresolved_reloc_p = FALSE;
6789
6790 if (sreloc == NULL)
6791 {
6792 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6793 ! globals->use_rel);
6794
6795 if (sreloc == NULL)
6796 return bfd_reloc_notsupported;
6797 }
6798
6799 skip = FALSE;
6800 relocate = FALSE;
6801
6802 outrel.r_addend = addend;
6803 outrel.r_offset =
6804 _bfd_elf_section_offset (output_bfd, info, input_section,
6805 rel->r_offset);
6806 if (outrel.r_offset == (bfd_vma) -1)
6807 skip = TRUE;
6808 else if (outrel.r_offset == (bfd_vma) -2)
6809 skip = TRUE, relocate = TRUE;
6810 outrel.r_offset += (input_section->output_section->vma
6811 + input_section->output_offset);
6812
6813 if (skip)
6814 memset (&outrel, 0, sizeof outrel);
6815 else if (h != NULL
6816 && h->dynindx != -1
6817 && (!info->shared
6818 || !info->symbolic
6819 || !h->def_regular))
6820 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6821 else
6822 {
6823 int symbol;
6824
6825 /* This symbol is local, or marked to become local. */
6826 if (sym_flags == STT_ARM_TFUNC)
6827 value |= 1;
6828 if (globals->symbian_p)
6829 {
6830 asection *osec;
6831
6832 /* On Symbian OS, the data segment and text segement
6833 can be relocated independently. Therefore, we
6834 must indicate the segment to which this
6835 relocation is relative. The BPABI allows us to
6836 use any symbol in the right segment; we just use
6837 the section symbol as it is convenient. (We
6838 cannot use the symbol given by "h" directly as it
6839 will not appear in the dynamic symbol table.)
6840
6841 Note that the dynamic linker ignores the section
6842 symbol value, so we don't subtract osec->vma
6843 from the emitted reloc addend. */
6844 if (sym_sec)
6845 osec = sym_sec->output_section;
6846 else
6847 osec = input_section->output_section;
6848 symbol = elf_section_data (osec)->dynindx;
6849 if (symbol == 0)
6850 {
6851 struct elf_link_hash_table *htab = elf_hash_table (info);
6852
6853 if ((osec->flags & SEC_READONLY) == 0
6854 && htab->data_index_section != NULL)
6855 osec = htab->data_index_section;
6856 else
6857 osec = htab->text_index_section;
6858 symbol = elf_section_data (osec)->dynindx;
6859 }
6860 BFD_ASSERT (symbol != 0);
6861 }
6862 else
6863 /* On SVR4-ish systems, the dynamic loader cannot
6864 relocate the text and data segments independently,
6865 so the symbol does not matter. */
6866 symbol = 0;
6867 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
6868 if (globals->use_rel)
6869 relocate = TRUE;
6870 else
6871 outrel.r_addend += value;
6872 }
6873
6874 loc = sreloc->contents;
6875 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
6876 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
6877
6878 /* If this reloc is against an external symbol, we do not want to
6879 fiddle with the addend. Otherwise, we need to include the symbol
6880 value so that it becomes an addend for the dynamic reloc. */
6881 if (! relocate)
6882 return bfd_reloc_ok;
6883
6884 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6885 contents, rel->r_offset, value,
6886 (bfd_vma) 0);
6887 }
6888 else switch (r_type)
6889 {
6890 case R_ARM_ABS12:
6891 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6892
6893 case R_ARM_XPC25: /* Arm BLX instruction. */
6894 case R_ARM_CALL:
6895 case R_ARM_JUMP24:
6896 case R_ARM_PC24: /* Arm B/BL instruction. */
6897 case R_ARM_PLT32:
6898 {
6899 bfd_vma from;
6900 bfd_signed_vma branch_offset;
6901 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
6902
6903 if (r_type == R_ARM_XPC25)
6904 {
6905 /* Check for Arm calling Arm function. */
6906 /* FIXME: Should we translate the instruction into a BL
6907 instruction instead ? */
6908 if (sym_flags != STT_ARM_TFUNC)
6909 (*_bfd_error_handler)
6910 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
6911 input_bfd,
6912 h ? h->root.root.string : "(local)");
6913 }
6914 else if (r_type == R_ARM_PC24)
6915 {
6916 /* Check for Arm calling Thumb function. */
6917 if (sym_flags == STT_ARM_TFUNC)
6918 {
6919 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
6920 output_bfd, input_section,
6921 hit_data, sym_sec, rel->r_offset,
6922 signed_addend, value,
6923 error_message))
6924 return bfd_reloc_ok;
6925 else
6926 return bfd_reloc_dangerous;
6927 }
6928 }
6929
6930 /* Check if a stub has to be inserted because the
6931 destination is too far or we are changing mode. */
6932 if ( r_type == R_ARM_CALL
6933 || r_type == R_ARM_JUMP24
6934 || r_type == R_ARM_PLT32)
6935 {
6936 /* If the call goes through a PLT entry, make sure to
6937 check distance to the right destination address. */
6938 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
6939 {
6940 value = (splt->output_section->vma
6941 + splt->output_offset
6942 + h->plt.offset);
6943 *unresolved_reloc_p = FALSE;
6944 }
6945
6946 from = (input_section->output_section->vma
6947 + input_section->output_offset
6948 + rel->r_offset);
6949 branch_offset = (bfd_signed_vma)(value - from);
6950
6951 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
6952 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
6953 || ((sym_flags == STT_ARM_TFUNC)
6954 && (((r_type == R_ARM_CALL) && !globals->use_blx)
6955 || (r_type == R_ARM_JUMP24)
6956 || (r_type == R_ARM_PLT32) ))
6957 )
6958 {
6959 /* The target is out of reach, so redirect the
6960 branch to the local stub for this function. */
6961
6962 stub_entry = elf32_arm_get_stub_entry (input_section,
6963 sym_sec, h,
6964 rel, globals);
6965 if (stub_entry != NULL)
6966 value = (stub_entry->stub_offset
6967 + stub_entry->stub_sec->output_offset
6968 + stub_entry->stub_sec->output_section->vma);
6969 }
6970 }
6971
6972 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
6973 where:
6974 S is the address of the symbol in the relocation.
6975 P is address of the instruction being relocated.
6976 A is the addend (extracted from the instruction) in bytes.
6977
6978 S is held in 'value'.
6979 P is the base address of the section containing the
6980 instruction plus the offset of the reloc into that
6981 section, ie:
6982 (input_section->output_section->vma +
6983 input_section->output_offset +
6984 rel->r_offset).
6985 A is the addend, converted into bytes, ie:
6986 (signed_addend * 4)
6987
6988 Note: None of these operations have knowledge of the pipeline
6989 size of the processor, thus it is up to the assembler to
6990 encode this information into the addend. */
6991 value -= (input_section->output_section->vma
6992 + input_section->output_offset);
6993 value -= rel->r_offset;
6994 if (globals->use_rel)
6995 value += (signed_addend << howto->size);
6996 else
6997 /* RELA addends do not have to be adjusted by howto->size. */
6998 value += signed_addend;
6999
7000 signed_addend = value;
7001 signed_addend >>= howto->rightshift;
7002
7003 /* A branch to an undefined weak symbol is turned into a jump to
7004 the next instruction unless a PLT entry will be created. */
7005 if (h && h->root.type == bfd_link_hash_undefweak
7006 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7007 {
7008 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000)
7009 | 0x0affffff;
7010 }
7011 else
7012 {
7013 /* Perform a signed range check. */
7014 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7015 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7016 return bfd_reloc_overflow;
7017
7018 addend = (value & 2);
7019
7020 value = (signed_addend & howto->dst_mask)
7021 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7022
7023 if (r_type == R_ARM_CALL)
7024 {
7025 /* Set the H bit in the BLX instruction. */
7026 if (sym_flags == STT_ARM_TFUNC)
7027 {
7028 if (addend)
7029 value |= (1 << 24);
7030 else
7031 value &= ~(bfd_vma)(1 << 24);
7032 }
7033
7034 /* Select the correct instruction (BL or BLX). */
7035 /* Only if we are not handling a BL to a stub. In this
7036 case, mode switching is performed by the stub. */
7037 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7038 value |= (1 << 28);
7039 else
7040 {
7041 value &= ~(bfd_vma)(1 << 28);
7042 value |= (1 << 24);
7043 }
7044 }
7045 }
7046 }
7047 break;
7048
7049 case R_ARM_ABS32:
7050 value += addend;
7051 if (sym_flags == STT_ARM_TFUNC)
7052 value |= 1;
7053 break;
7054
7055 case R_ARM_ABS32_NOI:
7056 value += addend;
7057 break;
7058
7059 case R_ARM_REL32:
7060 value += addend;
7061 if (sym_flags == STT_ARM_TFUNC)
7062 value |= 1;
7063 value -= (input_section->output_section->vma
7064 + input_section->output_offset + rel->r_offset);
7065 break;
7066
7067 case R_ARM_REL32_NOI:
7068 value += addend;
7069 value -= (input_section->output_section->vma
7070 + input_section->output_offset + rel->r_offset);
7071 break;
7072
7073 case R_ARM_PREL31:
7074 value -= (input_section->output_section->vma
7075 + input_section->output_offset + rel->r_offset);
7076 value += signed_addend;
7077 if (! h || h->root.type != bfd_link_hash_undefweak)
7078 {
7079 /* Check for overflow. */
7080 if ((value ^ (value >> 1)) & (1 << 30))
7081 return bfd_reloc_overflow;
7082 }
7083 value &= 0x7fffffff;
7084 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7085 if (sym_flags == STT_ARM_TFUNC)
7086 value |= 1;
7087 break;
7088 }
7089
7090 bfd_put_32 (input_bfd, value, hit_data);
7091 return bfd_reloc_ok;
7092
7093 case R_ARM_ABS8:
7094 value += addend;
7095 if ((long) value > 0x7f || (long) value < -0x80)
7096 return bfd_reloc_overflow;
7097
7098 bfd_put_8 (input_bfd, value, hit_data);
7099 return bfd_reloc_ok;
7100
7101 case R_ARM_ABS16:
7102 value += addend;
7103
7104 if ((long) value > 0x7fff || (long) value < -0x8000)
7105 return bfd_reloc_overflow;
7106
7107 bfd_put_16 (input_bfd, value, hit_data);
7108 return bfd_reloc_ok;
7109
7110 case R_ARM_THM_ABS5:
7111 /* Support ldr and str instructions for the thumb. */
7112 if (globals->use_rel)
7113 {
7114 /* Need to refetch addend. */
7115 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7116 /* ??? Need to determine shift amount from operand size. */
7117 addend >>= howto->rightshift;
7118 }
7119 value += addend;
7120
7121 /* ??? Isn't value unsigned? */
7122 if ((long) value > 0x1f || (long) value < -0x10)
7123 return bfd_reloc_overflow;
7124
7125 /* ??? Value needs to be properly shifted into place first. */
7126 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7127 bfd_put_16 (input_bfd, value, hit_data);
7128 return bfd_reloc_ok;
7129
7130 case R_ARM_THM_ALU_PREL_11_0:
7131 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7132 {
7133 bfd_vma insn;
7134 bfd_signed_vma relocation;
7135
7136 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7137 | bfd_get_16 (input_bfd, hit_data + 2);
7138
7139 if (globals->use_rel)
7140 {
7141 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7142 | ((insn & (1 << 26)) >> 15);
7143 if (insn & 0xf00000)
7144 signed_addend = -signed_addend;
7145 }
7146
7147 relocation = value + signed_addend;
7148 relocation -= (input_section->output_section->vma
7149 + input_section->output_offset
7150 + rel->r_offset);
7151
7152 value = abs (relocation);
7153
7154 if (value >= 0x1000)
7155 return bfd_reloc_overflow;
7156
7157 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7158 | ((value & 0x700) << 4)
7159 | ((value & 0x800) << 15);
7160 if (relocation < 0)
7161 insn |= 0xa00000;
7162
7163 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7164 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7165
7166 return bfd_reloc_ok;
7167 }
7168
7169 case R_ARM_THM_PC12:
7170 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7171 {
7172 bfd_vma insn;
7173 bfd_signed_vma relocation;
7174
7175 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7176 | bfd_get_16 (input_bfd, hit_data + 2);
7177
7178 if (globals->use_rel)
7179 {
7180 signed_addend = insn & 0xfff;
7181 if (!(insn & (1 << 23)))
7182 signed_addend = -signed_addend;
7183 }
7184
7185 relocation = value + signed_addend;
7186 relocation -= (input_section->output_section->vma
7187 + input_section->output_offset
7188 + rel->r_offset);
7189
7190 value = abs (relocation);
7191
7192 if (value >= 0x1000)
7193 return bfd_reloc_overflow;
7194
7195 insn = (insn & 0xff7ff000) | value;
7196 if (relocation >= 0)
7197 insn |= (1 << 23);
7198
7199 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7200 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7201
7202 return bfd_reloc_ok;
7203 }
7204
7205 case R_ARM_THM_XPC22:
7206 case R_ARM_THM_CALL:
7207 case R_ARM_THM_JUMP24:
7208 /* Thumb BL (branch long instruction). */
7209 {
7210 bfd_vma relocation;
7211 bfd_vma reloc_sign;
7212 bfd_boolean overflow = FALSE;
7213 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7214 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7215 bfd_signed_vma reloc_signed_max;
7216 bfd_signed_vma reloc_signed_min;
7217 bfd_vma check;
7218 bfd_signed_vma signed_check;
7219 int bitsize;
7220 int thumb2 = using_thumb2 (globals);
7221
7222 /* A branch to an undefined weak symbol is turned into a jump to
7223 the next instruction unless a PLT entry will be created. */
7224 if (h && h->root.type == bfd_link_hash_undefweak
7225 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7226 {
7227 bfd_put_16 (input_bfd, 0xe000, hit_data);
7228 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7229 return bfd_reloc_ok;
7230 }
7231
7232 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7233 with Thumb-1) involving the J1 and J2 bits. */
7234 if (globals->use_rel)
7235 {
7236 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7237 bfd_vma upper = upper_insn & 0x3ff;
7238 bfd_vma lower = lower_insn & 0x7ff;
7239 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7240 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7241 bfd_vma i1 = j1 ^ s ? 0 : 1;
7242 bfd_vma i2 = j2 ^ s ? 0 : 1;
7243
7244 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7245 /* Sign extend. */
7246 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7247
7248 signed_addend = addend;
7249 }
7250
7251 if (r_type == R_ARM_THM_XPC22)
7252 {
7253 /* Check for Thumb to Thumb call. */
7254 /* FIXME: Should we translate the instruction into a BL
7255 instruction instead ? */
7256 if (sym_flags == STT_ARM_TFUNC)
7257 (*_bfd_error_handler)
7258 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7259 input_bfd,
7260 h ? h->root.root.string : "(local)");
7261 }
7262 else
7263 {
7264 /* If it is not a call to Thumb, assume call to Arm.
7265 If it is a call relative to a section name, then it is not a
7266 function call at all, but rather a long jump. Calls through
7267 the PLT do not require stubs. */
7268 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7269 && (h == NULL || splt == NULL
7270 || h->plt.offset == (bfd_vma) -1))
7271 {
7272 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7273 {
7274 /* Convert BL to BLX. */
7275 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7276 }
7277 else if (( r_type != R_ARM_THM_CALL)
7278 && (r_type != R_ARM_THM_JUMP24))
7279 {
7280 if (elf32_thumb_to_arm_stub
7281 (info, sym_name, input_bfd, output_bfd, input_section,
7282 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7283 error_message))
7284 return bfd_reloc_ok;
7285 else
7286 return bfd_reloc_dangerous;
7287 }
7288 }
7289 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7290 && r_type == R_ARM_THM_CALL)
7291 {
7292 /* Make sure this is a BL. */
7293 lower_insn |= 0x1800;
7294 }
7295 }
7296
7297 /* Handle calls via the PLT. */
7298 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7299 {
7300 value = (splt->output_section->vma
7301 + splt->output_offset
7302 + h->plt.offset);
7303 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7304 {
7305 /* If the Thumb BLX instruction is available, convert the
7306 BL to a BLX instruction to call the ARM-mode PLT entry. */
7307 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7308 }
7309 else
7310 /* Target the Thumb stub before the ARM PLT entry. */
7311 value -= PLT_THUMB_STUB_SIZE;
7312 *unresolved_reloc_p = FALSE;
7313 }
7314
7315 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7316 {
7317 /* Check if a stub has to be inserted because the destination
7318 is too far. */
7319 bfd_vma from;
7320 bfd_signed_vma branch_offset;
7321 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7322
7323 from = (input_section->output_section->vma
7324 + input_section->output_offset
7325 + rel->r_offset);
7326 branch_offset = (bfd_signed_vma)(value - from);
7327
7328 if ((!thumb2
7329 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7330 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7331 ||
7332 (thumb2
7333 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7334 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7335 || ((sym_flags != STT_ARM_TFUNC)
7336 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7337 || r_type == R_ARM_THM_JUMP24)))
7338 {
7339 /* The target is out of reach or we are changing modes, so
7340 redirect the branch to the local stub for this
7341 function. */
7342 stub_entry = elf32_arm_get_stub_entry (input_section,
7343 sym_sec, h,
7344 rel, globals);
7345 if (stub_entry != NULL)
7346 value = (stub_entry->stub_offset
7347 + stub_entry->stub_sec->output_offset
7348 + stub_entry->stub_sec->output_section->vma);
7349
7350 /* If this call becomes a call to Arm, force BLX. */
7351 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7352 {
7353 if ((stub_entry
7354 && !arm_stub_is_thumb (stub_entry->stub_type))
7355 || (sym_flags != STT_ARM_TFUNC))
7356 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7357 }
7358 }
7359 }
7360
7361 relocation = value + signed_addend;
7362
7363 relocation -= (input_section->output_section->vma
7364 + input_section->output_offset
7365 + rel->r_offset);
7366
7367 check = relocation >> howto->rightshift;
7368
7369 /* If this is a signed value, the rightshift just dropped
7370 leading 1 bits (assuming twos complement). */
7371 if ((bfd_signed_vma) relocation >= 0)
7372 signed_check = check;
7373 else
7374 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7375
7376 /* Calculate the permissable maximum and minimum values for
7377 this relocation according to whether we're relocating for
7378 Thumb-2 or not. */
7379 bitsize = howto->bitsize;
7380 if (!thumb2)
7381 bitsize -= 2;
7382 reloc_signed_max = ((1 << (bitsize - 1)) - 1) >> howto->rightshift;
7383 reloc_signed_min = ~reloc_signed_max;
7384
7385 /* Assumes two's complement. */
7386 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7387 overflow = TRUE;
7388
7389 if ((lower_insn & 0x5000) == 0x4000)
7390 /* For a BLX instruction, make sure that the relocation is rounded up
7391 to a word boundary. This follows the semantics of the instruction
7392 which specifies that bit 1 of the target address will come from bit
7393 1 of the base address. */
7394 relocation = (relocation + 2) & ~ 3;
7395
7396 /* Put RELOCATION back into the insn. Assumes two's complement.
7397 We use the Thumb-2 encoding, which is safe even if dealing with
7398 a Thumb-1 instruction by virtue of our overflow check above. */
7399 reloc_sign = (signed_check < 0) ? 1 : 0;
7400 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7401 | ((relocation >> 12) & 0x3ff)
7402 | (reloc_sign << 10);
7403 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7404 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7405 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7406 | ((relocation >> 1) & 0x7ff);
7407
7408 /* Put the relocated value back in the object file: */
7409 bfd_put_16 (input_bfd, upper_insn, hit_data);
7410 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7411
7412 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7413 }
7414 break;
7415
7416 case R_ARM_THM_JUMP19:
7417 /* Thumb32 conditional branch instruction. */
7418 {
7419 bfd_vma relocation;
7420 bfd_boolean overflow = FALSE;
7421 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7422 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7423 bfd_signed_vma reloc_signed_max = 0xffffe;
7424 bfd_signed_vma reloc_signed_min = -0x100000;
7425 bfd_signed_vma signed_check;
7426
7427 /* Need to refetch the addend, reconstruct the top three bits,
7428 and squish the two 11 bit pieces together. */
7429 if (globals->use_rel)
7430 {
7431 bfd_vma S = (upper_insn & 0x0400) >> 10;
7432 bfd_vma upper = (upper_insn & 0x003f);
7433 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7434 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7435 bfd_vma lower = (lower_insn & 0x07ff);
7436
7437 upper |= J1 << 6;
7438 upper |= J2 << 7;
7439 upper |= (!S) << 8;
7440 upper -= 0x0100; /* Sign extend. */
7441
7442 addend = (upper << 12) | (lower << 1);
7443 signed_addend = addend;
7444 }
7445
7446 /* Handle calls via the PLT. */
7447 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7448 {
7449 value = (splt->output_section->vma
7450 + splt->output_offset
7451 + h->plt.offset);
7452 /* Target the Thumb stub before the ARM PLT entry. */
7453 value -= PLT_THUMB_STUB_SIZE;
7454 *unresolved_reloc_p = FALSE;
7455 }
7456
7457 /* ??? Should handle interworking? GCC might someday try to
7458 use this for tail calls. */
7459
7460 relocation = value + signed_addend;
7461 relocation -= (input_section->output_section->vma
7462 + input_section->output_offset
7463 + rel->r_offset);
7464 signed_check = (bfd_signed_vma) relocation;
7465
7466 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7467 overflow = TRUE;
7468
7469 /* Put RELOCATION back into the insn. */
7470 {
7471 bfd_vma S = (relocation & 0x00100000) >> 20;
7472 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7473 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7474 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7475 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7476
7477 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7478 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7479 }
7480
7481 /* Put the relocated value back in the object file: */
7482 bfd_put_16 (input_bfd, upper_insn, hit_data);
7483 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7484
7485 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7486 }
7487
7488 case R_ARM_THM_JUMP11:
7489 case R_ARM_THM_JUMP8:
7490 case R_ARM_THM_JUMP6:
7491 /* Thumb B (branch) instruction). */
7492 {
7493 bfd_signed_vma relocation;
7494 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7495 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7496 bfd_signed_vma signed_check;
7497
7498 /* CZB cannot jump backward. */
7499 if (r_type == R_ARM_THM_JUMP6)
7500 reloc_signed_min = 0;
7501
7502 if (globals->use_rel)
7503 {
7504 /* Need to refetch addend. */
7505 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7506 if (addend & ((howto->src_mask + 1) >> 1))
7507 {
7508 signed_addend = -1;
7509 signed_addend &= ~ howto->src_mask;
7510 signed_addend |= addend;
7511 }
7512 else
7513 signed_addend = addend;
7514 /* The value in the insn has been right shifted. We need to
7515 undo this, so that we can perform the address calculation
7516 in terms of bytes. */
7517 signed_addend <<= howto->rightshift;
7518 }
7519 relocation = value + signed_addend;
7520
7521 relocation -= (input_section->output_section->vma
7522 + input_section->output_offset
7523 + rel->r_offset);
7524
7525 relocation >>= howto->rightshift;
7526 signed_check = relocation;
7527
7528 if (r_type == R_ARM_THM_JUMP6)
7529 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7530 else
7531 relocation &= howto->dst_mask;
7532 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7533
7534 bfd_put_16 (input_bfd, relocation, hit_data);
7535
7536 /* Assumes two's complement. */
7537 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7538 return bfd_reloc_overflow;
7539
7540 return bfd_reloc_ok;
7541 }
7542
7543 case R_ARM_ALU_PCREL7_0:
7544 case R_ARM_ALU_PCREL15_8:
7545 case R_ARM_ALU_PCREL23_15:
7546 {
7547 bfd_vma insn;
7548 bfd_vma relocation;
7549
7550 insn = bfd_get_32 (input_bfd, hit_data);
7551 if (globals->use_rel)
7552 {
7553 /* Extract the addend. */
7554 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7555 signed_addend = addend;
7556 }
7557 relocation = value + signed_addend;
7558
7559 relocation -= (input_section->output_section->vma
7560 + input_section->output_offset
7561 + rel->r_offset);
7562 insn = (insn & ~0xfff)
7563 | ((howto->bitpos << 7) & 0xf00)
7564 | ((relocation >> howto->bitpos) & 0xff);
7565 bfd_put_32 (input_bfd, value, hit_data);
7566 }
7567 return bfd_reloc_ok;
7568
7569 case R_ARM_GNU_VTINHERIT:
7570 case R_ARM_GNU_VTENTRY:
7571 return bfd_reloc_ok;
7572
7573 case R_ARM_GOTOFF32:
7574 /* Relocation is relative to the start of the
7575 global offset table. */
7576
7577 BFD_ASSERT (sgot != NULL);
7578 if (sgot == NULL)
7579 return bfd_reloc_notsupported;
7580
7581 /* If we are addressing a Thumb function, we need to adjust the
7582 address by one, so that attempts to call the function pointer will
7583 correctly interpret it as Thumb code. */
7584 if (sym_flags == STT_ARM_TFUNC)
7585 value += 1;
7586
7587 /* Note that sgot->output_offset is not involved in this
7588 calculation. We always want the start of .got. If we
7589 define _GLOBAL_OFFSET_TABLE in a different way, as is
7590 permitted by the ABI, we might have to change this
7591 calculation. */
7592 value -= sgot->output_section->vma;
7593 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7594 contents, rel->r_offset, value,
7595 rel->r_addend);
7596
7597 case R_ARM_GOTPC:
7598 /* Use global offset table as symbol value. */
7599 BFD_ASSERT (sgot != NULL);
7600
7601 if (sgot == NULL)
7602 return bfd_reloc_notsupported;
7603
7604 *unresolved_reloc_p = FALSE;
7605 value = sgot->output_section->vma;
7606 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7607 contents, rel->r_offset, value,
7608 rel->r_addend);
7609
7610 case R_ARM_GOT32:
7611 case R_ARM_GOT_PREL:
7612 /* Relocation is to the entry for this symbol in the
7613 global offset table. */
7614 if (sgot == NULL)
7615 return bfd_reloc_notsupported;
7616
7617 if (h != NULL)
7618 {
7619 bfd_vma off;
7620 bfd_boolean dyn;
7621
7622 off = h->got.offset;
7623 BFD_ASSERT (off != (bfd_vma) -1);
7624 dyn = globals->root.dynamic_sections_created;
7625
7626 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7627 || (info->shared
7628 && SYMBOL_REFERENCES_LOCAL (info, h))
7629 || (ELF_ST_VISIBILITY (h->other)
7630 && h->root.type == bfd_link_hash_undefweak))
7631 {
7632 /* This is actually a static link, or it is a -Bsymbolic link
7633 and the symbol is defined locally. We must initialize this
7634 entry in the global offset table. Since the offset must
7635 always be a multiple of 4, we use the least significant bit
7636 to record whether we have initialized it already.
7637
7638 When doing a dynamic link, we create a .rel(a).got relocation
7639 entry to initialize the value. This is done in the
7640 finish_dynamic_symbol routine. */
7641 if ((off & 1) != 0)
7642 off &= ~1;
7643 else
7644 {
7645 /* If we are addressing a Thumb function, we need to
7646 adjust the address by one, so that attempts to
7647 call the function pointer will correctly
7648 interpret it as Thumb code. */
7649 if (sym_flags == STT_ARM_TFUNC)
7650 value |= 1;
7651
7652 bfd_put_32 (output_bfd, value, sgot->contents + off);
7653 h->got.offset |= 1;
7654 }
7655 }
7656 else
7657 *unresolved_reloc_p = FALSE;
7658
7659 value = sgot->output_offset + off;
7660 }
7661 else
7662 {
7663 bfd_vma off;
7664
7665 BFD_ASSERT (local_got_offsets != NULL &&
7666 local_got_offsets[r_symndx] != (bfd_vma) -1);
7667
7668 off = local_got_offsets[r_symndx];
7669
7670 /* The offset must always be a multiple of 4. We use the
7671 least significant bit to record whether we have already
7672 generated the necessary reloc. */
7673 if ((off & 1) != 0)
7674 off &= ~1;
7675 else
7676 {
7677 /* If we are addressing a Thumb function, we need to
7678 adjust the address by one, so that attempts to
7679 call the function pointer will correctly
7680 interpret it as Thumb code. */
7681 if (sym_flags == STT_ARM_TFUNC)
7682 value |= 1;
7683
7684 if (globals->use_rel)
7685 bfd_put_32 (output_bfd, value, sgot->contents + off);
7686
7687 if (info->shared)
7688 {
7689 asection * srelgot;
7690 Elf_Internal_Rela outrel;
7691 bfd_byte *loc;
7692
7693 srelgot = (bfd_get_section_by_name
7694 (dynobj, RELOC_SECTION (globals, ".got")));
7695 BFD_ASSERT (srelgot != NULL);
7696
7697 outrel.r_addend = addend + value;
7698 outrel.r_offset = (sgot->output_section->vma
7699 + sgot->output_offset
7700 + off);
7701 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7702 loc = srelgot->contents;
7703 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7704 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7705 }
7706
7707 local_got_offsets[r_symndx] |= 1;
7708 }
7709
7710 value = sgot->output_offset + off;
7711 }
7712 if (r_type != R_ARM_GOT32)
7713 value += sgot->output_section->vma;
7714
7715 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7716 contents, rel->r_offset, value,
7717 rel->r_addend);
7718
7719 case R_ARM_TLS_LDO32:
7720 value = value - dtpoff_base (info);
7721
7722 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7723 contents, rel->r_offset, value,
7724 rel->r_addend);
7725
7726 case R_ARM_TLS_LDM32:
7727 {
7728 bfd_vma off;
7729
7730 if (globals->sgot == NULL)
7731 abort ();
7732
7733 off = globals->tls_ldm_got.offset;
7734
7735 if ((off & 1) != 0)
7736 off &= ~1;
7737 else
7738 {
7739 /* If we don't know the module number, create a relocation
7740 for it. */
7741 if (info->shared)
7742 {
7743 Elf_Internal_Rela outrel;
7744 bfd_byte *loc;
7745
7746 if (globals->srelgot == NULL)
7747 abort ();
7748
7749 outrel.r_addend = 0;
7750 outrel.r_offset = (globals->sgot->output_section->vma
7751 + globals->sgot->output_offset + off);
7752 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7753
7754 if (globals->use_rel)
7755 bfd_put_32 (output_bfd, outrel.r_addend,
7756 globals->sgot->contents + off);
7757
7758 loc = globals->srelgot->contents;
7759 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7760 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7761 }
7762 else
7763 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7764
7765 globals->tls_ldm_got.offset |= 1;
7766 }
7767
7768 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7769 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7770
7771 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7772 contents, rel->r_offset, value,
7773 rel->r_addend);
7774 }
7775
7776 case R_ARM_TLS_GD32:
7777 case R_ARM_TLS_IE32:
7778 {
7779 bfd_vma off;
7780 int indx;
7781 char tls_type;
7782
7783 if (globals->sgot == NULL)
7784 abort ();
7785
7786 indx = 0;
7787 if (h != NULL)
7788 {
7789 bfd_boolean dyn;
7790 dyn = globals->root.dynamic_sections_created;
7791 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7792 && (!info->shared
7793 || !SYMBOL_REFERENCES_LOCAL (info, h)))
7794 {
7795 *unresolved_reloc_p = FALSE;
7796 indx = h->dynindx;
7797 }
7798 off = h->got.offset;
7799 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
7800 }
7801 else
7802 {
7803 if (local_got_offsets == NULL)
7804 abort ();
7805 off = local_got_offsets[r_symndx];
7806 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
7807 }
7808
7809 if (tls_type == GOT_UNKNOWN)
7810 abort ();
7811
7812 if ((off & 1) != 0)
7813 off &= ~1;
7814 else
7815 {
7816 bfd_boolean need_relocs = FALSE;
7817 Elf_Internal_Rela outrel;
7818 bfd_byte *loc = NULL;
7819 int cur_off = off;
7820
7821 /* The GOT entries have not been initialized yet. Do it
7822 now, and emit any relocations. If both an IE GOT and a
7823 GD GOT are necessary, we emit the GD first. */
7824
7825 if ((info->shared || indx != 0)
7826 && (h == NULL
7827 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7828 || h->root.type != bfd_link_hash_undefweak))
7829 {
7830 need_relocs = TRUE;
7831 if (globals->srelgot == NULL)
7832 abort ();
7833 loc = globals->srelgot->contents;
7834 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
7835 }
7836
7837 if (tls_type & GOT_TLS_GD)
7838 {
7839 if (need_relocs)
7840 {
7841 outrel.r_addend = 0;
7842 outrel.r_offset = (globals->sgot->output_section->vma
7843 + globals->sgot->output_offset
7844 + cur_off);
7845 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
7846
7847 if (globals->use_rel)
7848 bfd_put_32 (output_bfd, outrel.r_addend,
7849 globals->sgot->contents + cur_off);
7850
7851 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7852 globals->srelgot->reloc_count++;
7853 loc += RELOC_SIZE (globals);
7854
7855 if (indx == 0)
7856 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7857 globals->sgot->contents + cur_off + 4);
7858 else
7859 {
7860 outrel.r_addend = 0;
7861 outrel.r_info = ELF32_R_INFO (indx,
7862 R_ARM_TLS_DTPOFF32);
7863 outrel.r_offset += 4;
7864
7865 if (globals->use_rel)
7866 bfd_put_32 (output_bfd, outrel.r_addend,
7867 globals->sgot->contents + cur_off + 4);
7868
7869
7870 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7871 globals->srelgot->reloc_count++;
7872 loc += RELOC_SIZE (globals);
7873 }
7874 }
7875 else
7876 {
7877 /* If we are not emitting relocations for a
7878 general dynamic reference, then we must be in a
7879 static link or an executable link with the
7880 symbol binding locally. Mark it as belonging
7881 to module 1, the executable. */
7882 bfd_put_32 (output_bfd, 1,
7883 globals->sgot->contents + cur_off);
7884 bfd_put_32 (output_bfd, value - dtpoff_base (info),
7885 globals->sgot->contents + cur_off + 4);
7886 }
7887
7888 cur_off += 8;
7889 }
7890
7891 if (tls_type & GOT_TLS_IE)
7892 {
7893 if (need_relocs)
7894 {
7895 if (indx == 0)
7896 outrel.r_addend = value - dtpoff_base (info);
7897 else
7898 outrel.r_addend = 0;
7899 outrel.r_offset = (globals->sgot->output_section->vma
7900 + globals->sgot->output_offset
7901 + cur_off);
7902 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
7903
7904 if (globals->use_rel)
7905 bfd_put_32 (output_bfd, outrel.r_addend,
7906 globals->sgot->contents + cur_off);
7907
7908 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7909 globals->srelgot->reloc_count++;
7910 loc += RELOC_SIZE (globals);
7911 }
7912 else
7913 bfd_put_32 (output_bfd, tpoff (info, value),
7914 globals->sgot->contents + cur_off);
7915 cur_off += 4;
7916 }
7917
7918 if (h != NULL)
7919 h->got.offset |= 1;
7920 else
7921 local_got_offsets[r_symndx] |= 1;
7922 }
7923
7924 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
7925 off += 8;
7926 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7927 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7928
7929 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7930 contents, rel->r_offset, value,
7931 rel->r_addend);
7932 }
7933
7934 case R_ARM_TLS_LE32:
7935 if (info->shared)
7936 {
7937 (*_bfd_error_handler)
7938 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
7939 input_bfd, input_section,
7940 (long) rel->r_offset, howto->name);
7941 return FALSE;
7942 }
7943 else
7944 value = tpoff (info, value);
7945
7946 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7947 contents, rel->r_offset, value,
7948 rel->r_addend);
7949
7950 case R_ARM_V4BX:
7951 if (globals->fix_v4bx)
7952 {
7953 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7954
7955 /* Ensure that we have a BX instruction. */
7956 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
7957
7958 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
7959 {
7960 /* Branch to veneer. */
7961 bfd_vma glue_addr;
7962 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
7963 glue_addr -= input_section->output_section->vma
7964 + input_section->output_offset
7965 + rel->r_offset + 8;
7966 insn = (insn & 0xf0000000) | 0x0a000000
7967 | ((glue_addr >> 2) & 0x00ffffff);
7968 }
7969 else
7970 {
7971 /* Preserve Rm (lowest four bits) and the condition code
7972 (highest four bits). Other bits encode MOV PC,Rm. */
7973 insn = (insn & 0xf000000f) | 0x01a0f000;
7974 }
7975
7976 bfd_put_32 (input_bfd, insn, hit_data);
7977 }
7978 return bfd_reloc_ok;
7979
7980 case R_ARM_MOVW_ABS_NC:
7981 case R_ARM_MOVT_ABS:
7982 case R_ARM_MOVW_PREL_NC:
7983 case R_ARM_MOVT_PREL:
7984 /* Until we properly support segment-base-relative addressing then
7985 we assume the segment base to be zero, as for the group relocations.
7986 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
7987 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
7988 case R_ARM_MOVW_BREL_NC:
7989 case R_ARM_MOVW_BREL:
7990 case R_ARM_MOVT_BREL:
7991 {
7992 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
7993
7994 if (globals->use_rel)
7995 {
7996 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7997 signed_addend = (addend ^ 0x8000) - 0x8000;
7998 }
7999
8000 value += signed_addend;
8001
8002 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8003 value -= (input_section->output_section->vma
8004 + input_section->output_offset + rel->r_offset);
8005
8006 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8007 return bfd_reloc_overflow;
8008
8009 if (sym_flags == STT_ARM_TFUNC)
8010 value |= 1;
8011
8012 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8013 || r_type == R_ARM_MOVT_BREL)
8014 value >>= 16;
8015
8016 insn &= 0xfff0f000;
8017 insn |= value & 0xfff;
8018 insn |= (value & 0xf000) << 4;
8019 bfd_put_32 (input_bfd, insn, hit_data);
8020 }
8021 return bfd_reloc_ok;
8022
8023 case R_ARM_THM_MOVW_ABS_NC:
8024 case R_ARM_THM_MOVT_ABS:
8025 case R_ARM_THM_MOVW_PREL_NC:
8026 case R_ARM_THM_MOVT_PREL:
8027 /* Until we properly support segment-base-relative addressing then
8028 we assume the segment base to be zero, as for the above relocations.
8029 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8030 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8031 as R_ARM_THM_MOVT_ABS. */
8032 case R_ARM_THM_MOVW_BREL_NC:
8033 case R_ARM_THM_MOVW_BREL:
8034 case R_ARM_THM_MOVT_BREL:
8035 {
8036 bfd_vma insn;
8037
8038 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8039 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8040
8041 if (globals->use_rel)
8042 {
8043 addend = ((insn >> 4) & 0xf000)
8044 | ((insn >> 15) & 0x0800)
8045 | ((insn >> 4) & 0x0700)
8046 | (insn & 0x00ff);
8047 signed_addend = (addend ^ 0x8000) - 0x8000;
8048 }
8049
8050 value += signed_addend;
8051
8052 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8053 value -= (input_section->output_section->vma
8054 + input_section->output_offset + rel->r_offset);
8055
8056 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8057 return bfd_reloc_overflow;
8058
8059 if (sym_flags == STT_ARM_TFUNC)
8060 value |= 1;
8061
8062 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8063 || r_type == R_ARM_THM_MOVT_BREL)
8064 value >>= 16;
8065
8066 insn &= 0xfbf08f00;
8067 insn |= (value & 0xf000) << 4;
8068 insn |= (value & 0x0800) << 15;
8069 insn |= (value & 0x0700) << 4;
8070 insn |= (value & 0x00ff);
8071
8072 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8073 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8074 }
8075 return bfd_reloc_ok;
8076
8077 case R_ARM_ALU_PC_G0_NC:
8078 case R_ARM_ALU_PC_G1_NC:
8079 case R_ARM_ALU_PC_G0:
8080 case R_ARM_ALU_PC_G1:
8081 case R_ARM_ALU_PC_G2:
8082 case R_ARM_ALU_SB_G0_NC:
8083 case R_ARM_ALU_SB_G1_NC:
8084 case R_ARM_ALU_SB_G0:
8085 case R_ARM_ALU_SB_G1:
8086 case R_ARM_ALU_SB_G2:
8087 {
8088 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8089 bfd_vma pc = input_section->output_section->vma
8090 + input_section->output_offset + rel->r_offset;
8091 /* sb should be the origin of the *segment* containing the symbol.
8092 It is not clear how to obtain this OS-dependent value, so we
8093 make an arbitrary choice of zero. */
8094 bfd_vma sb = 0;
8095 bfd_vma residual;
8096 bfd_vma g_n;
8097 bfd_signed_vma signed_value;
8098 int group = 0;
8099
8100 /* Determine which group of bits to select. */
8101 switch (r_type)
8102 {
8103 case R_ARM_ALU_PC_G0_NC:
8104 case R_ARM_ALU_PC_G0:
8105 case R_ARM_ALU_SB_G0_NC:
8106 case R_ARM_ALU_SB_G0:
8107 group = 0;
8108 break;
8109
8110 case R_ARM_ALU_PC_G1_NC:
8111 case R_ARM_ALU_PC_G1:
8112 case R_ARM_ALU_SB_G1_NC:
8113 case R_ARM_ALU_SB_G1:
8114 group = 1;
8115 break;
8116
8117 case R_ARM_ALU_PC_G2:
8118 case R_ARM_ALU_SB_G2:
8119 group = 2;
8120 break;
8121
8122 default:
8123 abort ();
8124 }
8125
8126 /* If REL, extract the addend from the insn. If RELA, it will
8127 have already been fetched for us. */
8128 if (globals->use_rel)
8129 {
8130 int negative;
8131 bfd_vma constant = insn & 0xff;
8132 bfd_vma rotation = (insn & 0xf00) >> 8;
8133
8134 if (rotation == 0)
8135 signed_addend = constant;
8136 else
8137 {
8138 /* Compensate for the fact that in the instruction, the
8139 rotation is stored in multiples of 2 bits. */
8140 rotation *= 2;
8141
8142 /* Rotate "constant" right by "rotation" bits. */
8143 signed_addend = (constant >> rotation) |
8144 (constant << (8 * sizeof (bfd_vma) - rotation));
8145 }
8146
8147 /* Determine if the instruction is an ADD or a SUB.
8148 (For REL, this determines the sign of the addend.) */
8149 negative = identify_add_or_sub (insn);
8150 if (negative == 0)
8151 {
8152 (*_bfd_error_handler)
8153 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8154 input_bfd, input_section,
8155 (long) rel->r_offset, howto->name);
8156 return bfd_reloc_overflow;
8157 }
8158
8159 signed_addend *= negative;
8160 }
8161
8162 /* Compute the value (X) to go in the place. */
8163 if (r_type == R_ARM_ALU_PC_G0_NC
8164 || r_type == R_ARM_ALU_PC_G1_NC
8165 || r_type == R_ARM_ALU_PC_G0
8166 || r_type == R_ARM_ALU_PC_G1
8167 || r_type == R_ARM_ALU_PC_G2)
8168 /* PC relative. */
8169 signed_value = value - pc + signed_addend;
8170 else
8171 /* Section base relative. */
8172 signed_value = value - sb + signed_addend;
8173
8174 /* If the target symbol is a Thumb function, then set the
8175 Thumb bit in the address. */
8176 if (sym_flags == STT_ARM_TFUNC)
8177 signed_value |= 1;
8178
8179 /* Calculate the value of the relevant G_n, in encoded
8180 constant-with-rotation format. */
8181 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8182 &residual);
8183
8184 /* Check for overflow if required. */
8185 if ((r_type == R_ARM_ALU_PC_G0
8186 || r_type == R_ARM_ALU_PC_G1
8187 || r_type == R_ARM_ALU_PC_G2
8188 || r_type == R_ARM_ALU_SB_G0
8189 || r_type == R_ARM_ALU_SB_G1
8190 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8191 {
8192 (*_bfd_error_handler)
8193 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8194 input_bfd, input_section,
8195 (long) rel->r_offset, abs (signed_value), howto->name);
8196 return bfd_reloc_overflow;
8197 }
8198
8199 /* Mask out the value and the ADD/SUB part of the opcode; take care
8200 not to destroy the S bit. */
8201 insn &= 0xff1ff000;
8202
8203 /* Set the opcode according to whether the value to go in the
8204 place is negative. */
8205 if (signed_value < 0)
8206 insn |= 1 << 22;
8207 else
8208 insn |= 1 << 23;
8209
8210 /* Encode the offset. */
8211 insn |= g_n;
8212
8213 bfd_put_32 (input_bfd, insn, hit_data);
8214 }
8215 return bfd_reloc_ok;
8216
8217 case R_ARM_LDR_PC_G0:
8218 case R_ARM_LDR_PC_G1:
8219 case R_ARM_LDR_PC_G2:
8220 case R_ARM_LDR_SB_G0:
8221 case R_ARM_LDR_SB_G1:
8222 case R_ARM_LDR_SB_G2:
8223 {
8224 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8225 bfd_vma pc = input_section->output_section->vma
8226 + input_section->output_offset + rel->r_offset;
8227 bfd_vma sb = 0; /* See note above. */
8228 bfd_vma residual;
8229 bfd_signed_vma signed_value;
8230 int group = 0;
8231
8232 /* Determine which groups of bits to calculate. */
8233 switch (r_type)
8234 {
8235 case R_ARM_LDR_PC_G0:
8236 case R_ARM_LDR_SB_G0:
8237 group = 0;
8238 break;
8239
8240 case R_ARM_LDR_PC_G1:
8241 case R_ARM_LDR_SB_G1:
8242 group = 1;
8243 break;
8244
8245 case R_ARM_LDR_PC_G2:
8246 case R_ARM_LDR_SB_G2:
8247 group = 2;
8248 break;
8249
8250 default:
8251 abort ();
8252 }
8253
8254 /* If REL, extract the addend from the insn. If RELA, it will
8255 have already been fetched for us. */
8256 if (globals->use_rel)
8257 {
8258 int negative = (insn & (1 << 23)) ? 1 : -1;
8259 signed_addend = negative * (insn & 0xfff);
8260 }
8261
8262 /* Compute the value (X) to go in the place. */
8263 if (r_type == R_ARM_LDR_PC_G0
8264 || r_type == R_ARM_LDR_PC_G1
8265 || r_type == R_ARM_LDR_PC_G2)
8266 /* PC relative. */
8267 signed_value = value - pc + signed_addend;
8268 else
8269 /* Section base relative. */
8270 signed_value = value - sb + signed_addend;
8271
8272 /* Calculate the value of the relevant G_{n-1} to obtain
8273 the residual at that stage. */
8274 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8275
8276 /* Check for overflow. */
8277 if (residual >= 0x1000)
8278 {
8279 (*_bfd_error_handler)
8280 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8281 input_bfd, input_section,
8282 (long) rel->r_offset, abs (signed_value), howto->name);
8283 return bfd_reloc_overflow;
8284 }
8285
8286 /* Mask out the value and U bit. */
8287 insn &= 0xff7ff000;
8288
8289 /* Set the U bit if the value to go in the place is non-negative. */
8290 if (signed_value >= 0)
8291 insn |= 1 << 23;
8292
8293 /* Encode the offset. */
8294 insn |= residual;
8295
8296 bfd_put_32 (input_bfd, insn, hit_data);
8297 }
8298 return bfd_reloc_ok;
8299
8300 case R_ARM_LDRS_PC_G0:
8301 case R_ARM_LDRS_PC_G1:
8302 case R_ARM_LDRS_PC_G2:
8303 case R_ARM_LDRS_SB_G0:
8304 case R_ARM_LDRS_SB_G1:
8305 case R_ARM_LDRS_SB_G2:
8306 {
8307 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8308 bfd_vma pc = input_section->output_section->vma
8309 + input_section->output_offset + rel->r_offset;
8310 bfd_vma sb = 0; /* See note above. */
8311 bfd_vma residual;
8312 bfd_signed_vma signed_value;
8313 int group = 0;
8314
8315 /* Determine which groups of bits to calculate. */
8316 switch (r_type)
8317 {
8318 case R_ARM_LDRS_PC_G0:
8319 case R_ARM_LDRS_SB_G0:
8320 group = 0;
8321 break;
8322
8323 case R_ARM_LDRS_PC_G1:
8324 case R_ARM_LDRS_SB_G1:
8325 group = 1;
8326 break;
8327
8328 case R_ARM_LDRS_PC_G2:
8329 case R_ARM_LDRS_SB_G2:
8330 group = 2;
8331 break;
8332
8333 default:
8334 abort ();
8335 }
8336
8337 /* If REL, extract the addend from the insn. If RELA, it will
8338 have already been fetched for us. */
8339 if (globals->use_rel)
8340 {
8341 int negative = (insn & (1 << 23)) ? 1 : -1;
8342 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8343 }
8344
8345 /* Compute the value (X) to go in the place. */
8346 if (r_type == R_ARM_LDRS_PC_G0
8347 || r_type == R_ARM_LDRS_PC_G1
8348 || r_type == R_ARM_LDRS_PC_G2)
8349 /* PC relative. */
8350 signed_value = value - pc + signed_addend;
8351 else
8352 /* Section base relative. */
8353 signed_value = value - sb + signed_addend;
8354
8355 /* Calculate the value of the relevant G_{n-1} to obtain
8356 the residual at that stage. */
8357 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8358
8359 /* Check for overflow. */
8360 if (residual >= 0x100)
8361 {
8362 (*_bfd_error_handler)
8363 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8364 input_bfd, input_section,
8365 (long) rel->r_offset, abs (signed_value), howto->name);
8366 return bfd_reloc_overflow;
8367 }
8368
8369 /* Mask out the value and U bit. */
8370 insn &= 0xff7ff0f0;
8371
8372 /* Set the U bit if the value to go in the place is non-negative. */
8373 if (signed_value >= 0)
8374 insn |= 1 << 23;
8375
8376 /* Encode the offset. */
8377 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8378
8379 bfd_put_32 (input_bfd, insn, hit_data);
8380 }
8381 return bfd_reloc_ok;
8382
8383 case R_ARM_LDC_PC_G0:
8384 case R_ARM_LDC_PC_G1:
8385 case R_ARM_LDC_PC_G2:
8386 case R_ARM_LDC_SB_G0:
8387 case R_ARM_LDC_SB_G1:
8388 case R_ARM_LDC_SB_G2:
8389 {
8390 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8391 bfd_vma pc = input_section->output_section->vma
8392 + input_section->output_offset + rel->r_offset;
8393 bfd_vma sb = 0; /* See note above. */
8394 bfd_vma residual;
8395 bfd_signed_vma signed_value;
8396 int group = 0;
8397
8398 /* Determine which groups of bits to calculate. */
8399 switch (r_type)
8400 {
8401 case R_ARM_LDC_PC_G0:
8402 case R_ARM_LDC_SB_G0:
8403 group = 0;
8404 break;
8405
8406 case R_ARM_LDC_PC_G1:
8407 case R_ARM_LDC_SB_G1:
8408 group = 1;
8409 break;
8410
8411 case R_ARM_LDC_PC_G2:
8412 case R_ARM_LDC_SB_G2:
8413 group = 2;
8414 break;
8415
8416 default:
8417 abort ();
8418 }
8419
8420 /* If REL, extract the addend from the insn. If RELA, it will
8421 have already been fetched for us. */
8422 if (globals->use_rel)
8423 {
8424 int negative = (insn & (1 << 23)) ? 1 : -1;
8425 signed_addend = negative * ((insn & 0xff) << 2);
8426 }
8427
8428 /* Compute the value (X) to go in the place. */
8429 if (r_type == R_ARM_LDC_PC_G0
8430 || r_type == R_ARM_LDC_PC_G1
8431 || r_type == R_ARM_LDC_PC_G2)
8432 /* PC relative. */
8433 signed_value = value - pc + signed_addend;
8434 else
8435 /* Section base relative. */
8436 signed_value = value - sb + signed_addend;
8437
8438 /* Calculate the value of the relevant G_{n-1} to obtain
8439 the residual at that stage. */
8440 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8441
8442 /* Check for overflow. (The absolute value to go in the place must be
8443 divisible by four and, after having been divided by four, must
8444 fit in eight bits.) */
8445 if ((residual & 0x3) != 0 || residual >= 0x400)
8446 {
8447 (*_bfd_error_handler)
8448 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8449 input_bfd, input_section,
8450 (long) rel->r_offset, abs (signed_value), howto->name);
8451 return bfd_reloc_overflow;
8452 }
8453
8454 /* Mask out the value and U bit. */
8455 insn &= 0xff7fff00;
8456
8457 /* Set the U bit if the value to go in the place is non-negative. */
8458 if (signed_value >= 0)
8459 insn |= 1 << 23;
8460
8461 /* Encode the offset. */
8462 insn |= residual >> 2;
8463
8464 bfd_put_32 (input_bfd, insn, hit_data);
8465 }
8466 return bfd_reloc_ok;
8467
8468 default:
8469 return bfd_reloc_notsupported;
8470 }
8471 }
8472
8473 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8474 static void
8475 arm_add_to_rel (bfd * abfd,
8476 bfd_byte * address,
8477 reloc_howto_type * howto,
8478 bfd_signed_vma increment)
8479 {
8480 bfd_signed_vma addend;
8481
8482 if (howto->type == R_ARM_THM_CALL
8483 || howto->type == R_ARM_THM_JUMP24)
8484 {
8485 int upper_insn, lower_insn;
8486 int upper, lower;
8487
8488 upper_insn = bfd_get_16 (abfd, address);
8489 lower_insn = bfd_get_16 (abfd, address + 2);
8490 upper = upper_insn & 0x7ff;
8491 lower = lower_insn & 0x7ff;
8492
8493 addend = (upper << 12) | (lower << 1);
8494 addend += increment;
8495 addend >>= 1;
8496
8497 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8498 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8499
8500 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8501 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8502 }
8503 else
8504 {
8505 bfd_vma contents;
8506
8507 contents = bfd_get_32 (abfd, address);
8508
8509 /* Get the (signed) value from the instruction. */
8510 addend = contents & howto->src_mask;
8511 if (addend & ((howto->src_mask + 1) >> 1))
8512 {
8513 bfd_signed_vma mask;
8514
8515 mask = -1;
8516 mask &= ~ howto->src_mask;
8517 addend |= mask;
8518 }
8519
8520 /* Add in the increment, (which is a byte value). */
8521 switch (howto->type)
8522 {
8523 default:
8524 addend += increment;
8525 break;
8526
8527 case R_ARM_PC24:
8528 case R_ARM_PLT32:
8529 case R_ARM_CALL:
8530 case R_ARM_JUMP24:
8531 addend <<= howto->size;
8532 addend += increment;
8533
8534 /* Should we check for overflow here ? */
8535
8536 /* Drop any undesired bits. */
8537 addend >>= howto->rightshift;
8538 break;
8539 }
8540
8541 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8542
8543 bfd_put_32 (abfd, contents, address);
8544 }
8545 }
8546
8547 #define IS_ARM_TLS_RELOC(R_TYPE) \
8548 ((R_TYPE) == R_ARM_TLS_GD32 \
8549 || (R_TYPE) == R_ARM_TLS_LDO32 \
8550 || (R_TYPE) == R_ARM_TLS_LDM32 \
8551 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8552 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8553 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8554 || (R_TYPE) == R_ARM_TLS_LE32 \
8555 || (R_TYPE) == R_ARM_TLS_IE32)
8556
8557 /* Relocate an ARM ELF section. */
8558
8559 static bfd_boolean
8560 elf32_arm_relocate_section (bfd * output_bfd,
8561 struct bfd_link_info * info,
8562 bfd * input_bfd,
8563 asection * input_section,
8564 bfd_byte * contents,
8565 Elf_Internal_Rela * relocs,
8566 Elf_Internal_Sym * local_syms,
8567 asection ** local_sections)
8568 {
8569 Elf_Internal_Shdr *symtab_hdr;
8570 struct elf_link_hash_entry **sym_hashes;
8571 Elf_Internal_Rela *rel;
8572 Elf_Internal_Rela *relend;
8573 const char *name;
8574 struct elf32_arm_link_hash_table * globals;
8575
8576 globals = elf32_arm_hash_table (info);
8577
8578 symtab_hdr = & elf_symtab_hdr (input_bfd);
8579 sym_hashes = elf_sym_hashes (input_bfd);
8580
8581 rel = relocs;
8582 relend = relocs + input_section->reloc_count;
8583 for (; rel < relend; rel++)
8584 {
8585 int r_type;
8586 reloc_howto_type * howto;
8587 unsigned long r_symndx;
8588 Elf_Internal_Sym * sym;
8589 asection * sec;
8590 struct elf_link_hash_entry * h;
8591 bfd_vma relocation;
8592 bfd_reloc_status_type r;
8593 arelent bfd_reloc;
8594 char sym_type;
8595 bfd_boolean unresolved_reloc = FALSE;
8596 char *error_message = NULL;
8597
8598 r_symndx = ELF32_R_SYM (rel->r_info);
8599 r_type = ELF32_R_TYPE (rel->r_info);
8600 r_type = arm_real_reloc_type (globals, r_type);
8601
8602 if ( r_type == R_ARM_GNU_VTENTRY
8603 || r_type == R_ARM_GNU_VTINHERIT)
8604 continue;
8605
8606 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8607 howto = bfd_reloc.howto;
8608
8609 h = NULL;
8610 sym = NULL;
8611 sec = NULL;
8612
8613 if (r_symndx < symtab_hdr->sh_info)
8614 {
8615 sym = local_syms + r_symndx;
8616 sym_type = ELF32_ST_TYPE (sym->st_info);
8617 sec = local_sections[r_symndx];
8618 if (globals->use_rel)
8619 {
8620 relocation = (sec->output_section->vma
8621 + sec->output_offset
8622 + sym->st_value);
8623 if (!info->relocatable
8624 && (sec->flags & SEC_MERGE)
8625 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8626 {
8627 asection *msec;
8628 bfd_vma addend, value;
8629
8630 switch (r_type)
8631 {
8632 case R_ARM_MOVW_ABS_NC:
8633 case R_ARM_MOVT_ABS:
8634 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8635 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8636 addend = (addend ^ 0x8000) - 0x8000;
8637 break;
8638
8639 case R_ARM_THM_MOVW_ABS_NC:
8640 case R_ARM_THM_MOVT_ABS:
8641 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8642 << 16;
8643 value |= bfd_get_16 (input_bfd,
8644 contents + rel->r_offset + 2);
8645 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8646 | ((value & 0x04000000) >> 15);
8647 addend = (addend ^ 0x8000) - 0x8000;
8648 break;
8649
8650 default:
8651 if (howto->rightshift
8652 || (howto->src_mask & (howto->src_mask + 1)))
8653 {
8654 (*_bfd_error_handler)
8655 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8656 input_bfd, input_section,
8657 (long) rel->r_offset, howto->name);
8658 return FALSE;
8659 }
8660
8661 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8662
8663 /* Get the (signed) value from the instruction. */
8664 addend = value & howto->src_mask;
8665 if (addend & ((howto->src_mask + 1) >> 1))
8666 {
8667 bfd_signed_vma mask;
8668
8669 mask = -1;
8670 mask &= ~ howto->src_mask;
8671 addend |= mask;
8672 }
8673 break;
8674 }
8675
8676 msec = sec;
8677 addend =
8678 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8679 - relocation;
8680 addend += msec->output_section->vma + msec->output_offset;
8681
8682 /* Cases here must match those in the preceeding
8683 switch statement. */
8684 switch (r_type)
8685 {
8686 case R_ARM_MOVW_ABS_NC:
8687 case R_ARM_MOVT_ABS:
8688 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8689 | (addend & 0xfff);
8690 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8691 break;
8692
8693 case R_ARM_THM_MOVW_ABS_NC:
8694 case R_ARM_THM_MOVT_ABS:
8695 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8696 | (addend & 0xff) | ((addend & 0x0800) << 15);
8697 bfd_put_16 (input_bfd, value >> 16,
8698 contents + rel->r_offset);
8699 bfd_put_16 (input_bfd, value,
8700 contents + rel->r_offset + 2);
8701 break;
8702
8703 default:
8704 value = (value & ~ howto->dst_mask)
8705 | (addend & howto->dst_mask);
8706 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8707 break;
8708 }
8709 }
8710 }
8711 else
8712 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8713 }
8714 else
8715 {
8716 bfd_boolean warned;
8717
8718 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8719 r_symndx, symtab_hdr, sym_hashes,
8720 h, sec, relocation,
8721 unresolved_reloc, warned);
8722
8723 sym_type = h->type;
8724 }
8725
8726 if (sec != NULL && elf_discarded_section (sec))
8727 {
8728 /* For relocs against symbols from removed linkonce sections,
8729 or sections discarded by a linker script, we just want the
8730 section contents zeroed. Avoid any special processing. */
8731 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8732 rel->r_info = 0;
8733 rel->r_addend = 0;
8734 continue;
8735 }
8736
8737 if (info->relocatable)
8738 {
8739 /* This is a relocatable link. We don't have to change
8740 anything, unless the reloc is against a section symbol,
8741 in which case we have to adjust according to where the
8742 section symbol winds up in the output section. */
8743 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8744 {
8745 if (globals->use_rel)
8746 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8747 howto, (bfd_signed_vma) sec->output_offset);
8748 else
8749 rel->r_addend += sec->output_offset;
8750 }
8751 continue;
8752 }
8753
8754 if (h != NULL)
8755 name = h->root.root.string;
8756 else
8757 {
8758 name = (bfd_elf_string_from_elf_section
8759 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8760 if (name == NULL || *name == '\0')
8761 name = bfd_section_name (input_bfd, sec);
8762 }
8763
8764 if (r_symndx != 0
8765 && r_type != R_ARM_NONE
8766 && (h == NULL
8767 || h->root.type == bfd_link_hash_defined
8768 || h->root.type == bfd_link_hash_defweak)
8769 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
8770 {
8771 (*_bfd_error_handler)
8772 ((sym_type == STT_TLS
8773 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
8774 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
8775 input_bfd,
8776 input_section,
8777 (long) rel->r_offset,
8778 howto->name,
8779 name);
8780 }
8781
8782 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
8783 input_section, contents, rel,
8784 relocation, info, sec, name,
8785 (h ? ELF_ST_TYPE (h->type) :
8786 ELF_ST_TYPE (sym->st_info)), h,
8787 &unresolved_reloc, &error_message);
8788
8789 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
8790 because such sections are not SEC_ALLOC and thus ld.so will
8791 not process them. */
8792 if (unresolved_reloc
8793 && !((input_section->flags & SEC_DEBUGGING) != 0
8794 && h->def_dynamic))
8795 {
8796 (*_bfd_error_handler)
8797 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
8798 input_bfd,
8799 input_section,
8800 (long) rel->r_offset,
8801 howto->name,
8802 h->root.root.string);
8803 return FALSE;
8804 }
8805
8806 if (r != bfd_reloc_ok)
8807 {
8808 switch (r)
8809 {
8810 case bfd_reloc_overflow:
8811 /* If the overflowing reloc was to an undefined symbol,
8812 we have already printed one error message and there
8813 is no point complaining again. */
8814 if ((! h ||
8815 h->root.type != bfd_link_hash_undefined)
8816 && (!((*info->callbacks->reloc_overflow)
8817 (info, (h ? &h->root : NULL), name, howto->name,
8818 (bfd_vma) 0, input_bfd, input_section,
8819 rel->r_offset))))
8820 return FALSE;
8821 break;
8822
8823 case bfd_reloc_undefined:
8824 if (!((*info->callbacks->undefined_symbol)
8825 (info, name, input_bfd, input_section,
8826 rel->r_offset, TRUE)))
8827 return FALSE;
8828 break;
8829
8830 case bfd_reloc_outofrange:
8831 error_message = _("out of range");
8832 goto common_error;
8833
8834 case bfd_reloc_notsupported:
8835 error_message = _("unsupported relocation");
8836 goto common_error;
8837
8838 case bfd_reloc_dangerous:
8839 /* error_message should already be set. */
8840 goto common_error;
8841
8842 default:
8843 error_message = _("unknown error");
8844 /* Fall through. */
8845
8846 common_error:
8847 BFD_ASSERT (error_message != NULL);
8848 if (!((*info->callbacks->reloc_dangerous)
8849 (info, error_message, input_bfd, input_section,
8850 rel->r_offset)))
8851 return FALSE;
8852 break;
8853 }
8854 }
8855 }
8856
8857 return TRUE;
8858 }
8859
8860 /* Add a new unwind edit to the list described by HEAD, TAIL. If INDEX is zero,
8861 adds the edit to the start of the list. (The list must be built in order of
8862 ascending INDEX: the function's callers are primarily responsible for
8863 maintaining that condition). */
8864
8865 static void
8866 add_unwind_table_edit (arm_unwind_table_edit **head,
8867 arm_unwind_table_edit **tail,
8868 arm_unwind_edit_type type,
8869 asection *linked_section,
8870 unsigned int index)
8871 {
8872 arm_unwind_table_edit *new_edit = xmalloc (sizeof (arm_unwind_table_edit));
8873
8874 new_edit->type = type;
8875 new_edit->linked_section = linked_section;
8876 new_edit->index = index;
8877
8878 if (index > 0)
8879 {
8880 new_edit->next = NULL;
8881
8882 if (*tail)
8883 (*tail)->next = new_edit;
8884
8885 (*tail) = new_edit;
8886
8887 if (!*head)
8888 (*head) = new_edit;
8889 }
8890 else
8891 {
8892 new_edit->next = *head;
8893
8894 if (!*tail)
8895 *tail = new_edit;
8896
8897 *head = new_edit;
8898 }
8899 }
8900
8901 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
8902
8903 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
8904 static void
8905 adjust_exidx_size(asection *exidx_sec, int adjust)
8906 {
8907 asection *out_sec;
8908
8909 if (!exidx_sec->rawsize)
8910 exidx_sec->rawsize = exidx_sec->size;
8911
8912 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
8913 out_sec = exidx_sec->output_section;
8914 /* Adjust size of output section. */
8915 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
8916 }
8917
8918 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
8919 static void
8920 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
8921 {
8922 struct _arm_elf_section_data *exidx_arm_data;
8923
8924 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
8925 add_unwind_table_edit (
8926 &exidx_arm_data->u.exidx.unwind_edit_list,
8927 &exidx_arm_data->u.exidx.unwind_edit_tail,
8928 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
8929
8930 adjust_exidx_size(exidx_sec, 8);
8931 }
8932
8933 /* Scan .ARM.exidx tables, and create a list describing edits which should be
8934 made to those tables, such that:
8935
8936 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
8937 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
8938 codes which have been inlined into the index).
8939
8940 The edits are applied when the tables are written
8941 (in elf32_arm_write_section).
8942 */
8943
8944 bfd_boolean
8945 elf32_arm_fix_exidx_coverage (asection **text_section_order,
8946 unsigned int num_text_sections,
8947 struct bfd_link_info *info)
8948 {
8949 bfd *inp;
8950 unsigned int last_second_word = 0, i;
8951 asection *last_exidx_sec = NULL;
8952 asection *last_text_sec = NULL;
8953 int last_unwind_type = -1;
8954
8955 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
8956 text sections. */
8957 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
8958 {
8959 asection *sec;
8960
8961 for (sec = inp->sections; sec != NULL; sec = sec->next)
8962 {
8963 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
8964 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
8965
8966 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
8967 continue;
8968
8969 if (elf_sec->linked_to)
8970 {
8971 Elf_Internal_Shdr *linked_hdr
8972 = &elf_section_data (elf_sec->linked_to)->this_hdr;
8973 struct _arm_elf_section_data *linked_sec_arm_data
8974 = get_arm_elf_section_data (linked_hdr->bfd_section);
8975
8976 if (linked_sec_arm_data == NULL)
8977 continue;
8978
8979 /* Link this .ARM.exidx section back from the text section it
8980 describes. */
8981 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
8982 }
8983 }
8984 }
8985
8986 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
8987 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
8988 and add EXIDX_CANTUNWIND entries for sections with no unwind table data.
8989 */
8990
8991 for (i = 0; i < num_text_sections; i++)
8992 {
8993 asection *sec = text_section_order[i];
8994 asection *exidx_sec;
8995 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
8996 struct _arm_elf_section_data *exidx_arm_data;
8997 bfd_byte *contents = NULL;
8998 int deleted_exidx_bytes = 0;
8999 bfd_vma j;
9000 arm_unwind_table_edit *unwind_edit_head = NULL;
9001 arm_unwind_table_edit *unwind_edit_tail = NULL;
9002 Elf_Internal_Shdr *hdr;
9003 bfd *ibfd;
9004
9005 if (arm_data == NULL)
9006 continue;
9007
9008 exidx_sec = arm_data->u.text.arm_exidx_sec;
9009 if (exidx_sec == NULL)
9010 {
9011 /* Section has no unwind data. */
9012 if (last_unwind_type == 0 || !last_exidx_sec)
9013 continue;
9014
9015 /* Ignore zero sized sections. */
9016 if (sec->size == 0)
9017 continue;
9018
9019 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9020 last_unwind_type = 0;
9021 continue;
9022 }
9023
9024 /* Skip /DISCARD/ sections. */
9025 if (bfd_is_abs_section (exidx_sec->output_section))
9026 continue;
9027
9028 hdr = &elf_section_data (exidx_sec)->this_hdr;
9029 if (hdr->sh_type != SHT_ARM_EXIDX)
9030 continue;
9031
9032 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9033 if (exidx_arm_data == NULL)
9034 continue;
9035
9036 ibfd = exidx_sec->owner;
9037
9038 if (hdr->contents != NULL)
9039 contents = hdr->contents;
9040 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9041 /* An error? */
9042 continue;
9043
9044 for (j = 0; j < hdr->sh_size; j += 8)
9045 {
9046 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9047 int unwind_type;
9048 int elide = 0;
9049
9050 /* An EXIDX_CANTUNWIND entry. */
9051 if (second_word == 1)
9052 {
9053 if (last_unwind_type == 0)
9054 elide = 1;
9055 unwind_type = 0;
9056 }
9057 /* Inlined unwinding data. Merge if equal to previous. */
9058 else if ((second_word & 0x80000000) != 0)
9059 {
9060 if (last_second_word == second_word && last_unwind_type == 1)
9061 elide = 1;
9062 unwind_type = 1;
9063 last_second_word = second_word;
9064 }
9065 /* Normal table entry. In theory we could merge these too,
9066 but duplicate entries are likely to be much less common. */
9067 else
9068 unwind_type = 2;
9069
9070 if (elide)
9071 {
9072 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9073 DELETE_EXIDX_ENTRY, NULL, j / 8);
9074
9075 deleted_exidx_bytes += 8;
9076 }
9077
9078 last_unwind_type = unwind_type;
9079 }
9080
9081 /* Free contents if we allocated it ourselves. */
9082 if (contents != hdr->contents)
9083 free (contents);
9084
9085 /* Record edits to be applied later (in elf32_arm_write_section). */
9086 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9087 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9088
9089 if (deleted_exidx_bytes > 0)
9090 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9091
9092 last_exidx_sec = exidx_sec;
9093 last_text_sec = sec;
9094 }
9095
9096 /* Add terminating CANTUNWIND entry. */
9097 if (last_exidx_sec && last_unwind_type != 0)
9098 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9099
9100 return TRUE;
9101 }
9102
9103 static bfd_boolean
9104 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9105 bfd *ibfd, const char *name)
9106 {
9107 asection *sec, *osec;
9108
9109 sec = bfd_get_section_by_name (ibfd, name);
9110 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9111 return TRUE;
9112
9113 osec = sec->output_section;
9114 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9115 return TRUE;
9116
9117 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9118 sec->output_offset, sec->size))
9119 return FALSE;
9120
9121 return TRUE;
9122 }
9123
9124 static bfd_boolean
9125 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9126 {
9127 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9128
9129 /* Invoke the regular ELF backend linker to do all the work. */
9130 if (!bfd_elf_final_link (abfd, info))
9131 return FALSE;
9132
9133 /* Write out any glue sections now that we have created all the
9134 stubs. */
9135 if (globals->bfd_of_glue_owner != NULL)
9136 {
9137 if (! elf32_arm_output_glue_section (info, abfd,
9138 globals->bfd_of_glue_owner,
9139 ARM2THUMB_GLUE_SECTION_NAME))
9140 return FALSE;
9141
9142 if (! elf32_arm_output_glue_section (info, abfd,
9143 globals->bfd_of_glue_owner,
9144 THUMB2ARM_GLUE_SECTION_NAME))
9145 return FALSE;
9146
9147 if (! elf32_arm_output_glue_section (info, abfd,
9148 globals->bfd_of_glue_owner,
9149 VFP11_ERRATUM_VENEER_SECTION_NAME))
9150 return FALSE;
9151
9152 if (! elf32_arm_output_glue_section (info, abfd,
9153 globals->bfd_of_glue_owner,
9154 ARM_BX_GLUE_SECTION_NAME))
9155 return FALSE;
9156 }
9157
9158 return TRUE;
9159 }
9160
9161 /* Set the right machine number. */
9162
9163 static bfd_boolean
9164 elf32_arm_object_p (bfd *abfd)
9165 {
9166 unsigned int mach;
9167
9168 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9169
9170 if (mach != bfd_mach_arm_unknown)
9171 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9172
9173 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9174 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9175
9176 else
9177 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9178
9179 return TRUE;
9180 }
9181
9182 /* Function to keep ARM specific flags in the ELF header. */
9183
9184 static bfd_boolean
9185 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9186 {
9187 if (elf_flags_init (abfd)
9188 && elf_elfheader (abfd)->e_flags != flags)
9189 {
9190 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9191 {
9192 if (flags & EF_ARM_INTERWORK)
9193 (*_bfd_error_handler)
9194 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9195 abfd);
9196 else
9197 _bfd_error_handler
9198 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9199 abfd);
9200 }
9201 }
9202 else
9203 {
9204 elf_elfheader (abfd)->e_flags = flags;
9205 elf_flags_init (abfd) = TRUE;
9206 }
9207
9208 return TRUE;
9209 }
9210
9211 /* Copy backend specific data from one object module to another. */
9212
9213 static bfd_boolean
9214 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9215 {
9216 flagword in_flags;
9217 flagword out_flags;
9218
9219 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9220 return TRUE;
9221
9222 in_flags = elf_elfheader (ibfd)->e_flags;
9223 out_flags = elf_elfheader (obfd)->e_flags;
9224
9225 if (elf_flags_init (obfd)
9226 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9227 && in_flags != out_flags)
9228 {
9229 /* Cannot mix APCS26 and APCS32 code. */
9230 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9231 return FALSE;
9232
9233 /* Cannot mix float APCS and non-float APCS code. */
9234 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9235 return FALSE;
9236
9237 /* If the src and dest have different interworking flags
9238 then turn off the interworking bit. */
9239 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9240 {
9241 if (out_flags & EF_ARM_INTERWORK)
9242 _bfd_error_handler
9243 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9244 obfd, ibfd);
9245
9246 in_flags &= ~EF_ARM_INTERWORK;
9247 }
9248
9249 /* Likewise for PIC, though don't warn for this case. */
9250 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9251 in_flags &= ~EF_ARM_PIC;
9252 }
9253
9254 elf_elfheader (obfd)->e_flags = in_flags;
9255 elf_flags_init (obfd) = TRUE;
9256
9257 /* Also copy the EI_OSABI field. */
9258 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9259 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9260
9261 /* Copy object attributes. */
9262 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9263
9264 return TRUE;
9265 }
9266
9267 /* Values for Tag_ABI_PCS_R9_use. */
9268 enum
9269 {
9270 AEABI_R9_V6,
9271 AEABI_R9_SB,
9272 AEABI_R9_TLS,
9273 AEABI_R9_unused
9274 };
9275
9276 /* Values for Tag_ABI_PCS_RW_data. */
9277 enum
9278 {
9279 AEABI_PCS_RW_data_absolute,
9280 AEABI_PCS_RW_data_PCrel,
9281 AEABI_PCS_RW_data_SBrel,
9282 AEABI_PCS_RW_data_unused
9283 };
9284
9285 /* Values for Tag_ABI_enum_size. */
9286 enum
9287 {
9288 AEABI_enum_unused,
9289 AEABI_enum_short,
9290 AEABI_enum_wide,
9291 AEABI_enum_forced_wide
9292 };
9293
9294 /* Determine whether an object attribute tag takes an integer, a
9295 string or both. */
9296
9297 static int
9298 elf32_arm_obj_attrs_arg_type (int tag)
9299 {
9300 if (tag == Tag_compatibility)
9301 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9302 else if (tag == Tag_nodefaults)
9303 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9304 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9305 return ATTR_TYPE_FLAG_STR_VAL;
9306 else if (tag < 32)
9307 return ATTR_TYPE_FLAG_INT_VAL;
9308 else
9309 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9310 }
9311
9312 /* The ABI defines that Tag_conformance should be emitted first, and that
9313 Tag_nodefaults should be second (if either is defined). This sets those
9314 two positions, and bumps up the position of all the remaining tags to
9315 compensate. */
9316 static int
9317 elf32_arm_obj_attrs_order (int num)
9318 {
9319 if (num == 4)
9320 return Tag_conformance;
9321 if (num == 5)
9322 return Tag_nodefaults;
9323 if ((num - 2) < Tag_nodefaults)
9324 return num - 2;
9325 if ((num - 1) < Tag_conformance)
9326 return num - 1;
9327 return num;
9328 }
9329
9330 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9331 Returns -1 if no architecture could be read. */
9332
9333 static int
9334 get_secondary_compatible_arch (bfd *abfd)
9335 {
9336 obj_attribute *attr =
9337 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9338
9339 /* Note: the tag and its argument below are uleb128 values, though
9340 currently-defined values fit in one byte for each. */
9341 if (attr->s
9342 && attr->s[0] == Tag_CPU_arch
9343 && (attr->s[1] & 128) != 128
9344 && attr->s[2] == 0)
9345 return attr->s[1];
9346
9347 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9348 return -1;
9349 }
9350
9351 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9352 The tag is removed if ARCH is -1. */
9353
9354 static void
9355 set_secondary_compatible_arch (bfd *abfd, int arch)
9356 {
9357 obj_attribute *attr =
9358 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9359
9360 if (arch == -1)
9361 {
9362 attr->s = NULL;
9363 return;
9364 }
9365
9366 /* Note: the tag and its argument below are uleb128 values, though
9367 currently-defined values fit in one byte for each. */
9368 if (!attr->s)
9369 attr->s = bfd_alloc (abfd, 3);
9370 attr->s[0] = Tag_CPU_arch;
9371 attr->s[1] = arch;
9372 attr->s[2] = '\0';
9373 }
9374
9375 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9376 into account. */
9377
9378 static int
9379 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9380 int newtag, int secondary_compat)
9381 {
9382 #define T(X) TAG_CPU_ARCH_##X
9383 int tagl, tagh, result;
9384 const int v6t2[] =
9385 {
9386 T(V6T2), /* PRE_V4. */
9387 T(V6T2), /* V4. */
9388 T(V6T2), /* V4T. */
9389 T(V6T2), /* V5T. */
9390 T(V6T2), /* V5TE. */
9391 T(V6T2), /* V5TEJ. */
9392 T(V6T2), /* V6. */
9393 T(V7), /* V6KZ. */
9394 T(V6T2) /* V6T2. */
9395 };
9396 const int v6k[] =
9397 {
9398 T(V6K), /* PRE_V4. */
9399 T(V6K), /* V4. */
9400 T(V6K), /* V4T. */
9401 T(V6K), /* V5T. */
9402 T(V6K), /* V5TE. */
9403 T(V6K), /* V5TEJ. */
9404 T(V6K), /* V6. */
9405 T(V6KZ), /* V6KZ. */
9406 T(V7), /* V6T2. */
9407 T(V6K) /* V6K. */
9408 };
9409 const int v7[] =
9410 {
9411 T(V7), /* PRE_V4. */
9412 T(V7), /* V4. */
9413 T(V7), /* V4T. */
9414 T(V7), /* V5T. */
9415 T(V7), /* V5TE. */
9416 T(V7), /* V5TEJ. */
9417 T(V7), /* V6. */
9418 T(V7), /* V6KZ. */
9419 T(V7), /* V6T2. */
9420 T(V7), /* V6K. */
9421 T(V7) /* V7. */
9422 };
9423 const int v6_m[] =
9424 {
9425 -1, /* PRE_V4. */
9426 -1, /* V4. */
9427 T(V6K), /* V4T. */
9428 T(V6K), /* V5T. */
9429 T(V6K), /* V5TE. */
9430 T(V6K), /* V5TEJ. */
9431 T(V6K), /* V6. */
9432 T(V6KZ), /* V6KZ. */
9433 T(V7), /* V6T2. */
9434 T(V6K), /* V6K. */
9435 T(V7), /* V7. */
9436 T(V6_M) /* V6_M. */
9437 };
9438 const int v6s_m[] =
9439 {
9440 -1, /* PRE_V4. */
9441 -1, /* V4. */
9442 T(V6K), /* V4T. */
9443 T(V6K), /* V5T. */
9444 T(V6K), /* V5TE. */
9445 T(V6K), /* V5TEJ. */
9446 T(V6K), /* V6. */
9447 T(V6KZ), /* V6KZ. */
9448 T(V7), /* V6T2. */
9449 T(V6K), /* V6K. */
9450 T(V7), /* V7. */
9451 T(V6S_M), /* V6_M. */
9452 T(V6S_M) /* V6S_M. */
9453 };
9454 const int v4t_plus_v6_m[] =
9455 {
9456 -1, /* PRE_V4. */
9457 -1, /* V4. */
9458 T(V4T), /* V4T. */
9459 T(V5T), /* V5T. */
9460 T(V5TE), /* V5TE. */
9461 T(V5TEJ), /* V5TEJ. */
9462 T(V6), /* V6. */
9463 T(V6KZ), /* V6KZ. */
9464 T(V6T2), /* V6T2. */
9465 T(V6K), /* V6K. */
9466 T(V7), /* V7. */
9467 T(V6_M), /* V6_M. */
9468 T(V6S_M), /* V6S_M. */
9469 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9470 };
9471 const int *comb[] =
9472 {
9473 v6t2,
9474 v6k,
9475 v7,
9476 v6_m,
9477 v6s_m,
9478 /* Pseudo-architecture. */
9479 v4t_plus_v6_m
9480 };
9481
9482 /* Check we've not got a higher architecture than we know about. */
9483
9484 if (oldtag >= MAX_TAG_CPU_ARCH || newtag >= MAX_TAG_CPU_ARCH)
9485 {
9486 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9487 return -1;
9488 }
9489
9490 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9491
9492 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9493 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9494 oldtag = T(V4T_PLUS_V6_M);
9495
9496 /* And override the new tag if we have a Tag_also_compatible_with on the
9497 input. */
9498
9499 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9500 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9501 newtag = T(V4T_PLUS_V6_M);
9502
9503 tagl = (oldtag < newtag) ? oldtag : newtag;
9504 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9505
9506 /* Architectures before V6KZ add features monotonically. */
9507 if (tagh <= TAG_CPU_ARCH_V6KZ)
9508 return result;
9509
9510 result = comb[tagh - T(V6T2)][tagl];
9511
9512 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9513 as the canonical version. */
9514 if (result == T(V4T_PLUS_V6_M))
9515 {
9516 result = T(V4T);
9517 *secondary_compat_out = T(V6_M);
9518 }
9519 else
9520 *secondary_compat_out = -1;
9521
9522 if (result == -1)
9523 {
9524 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9525 ibfd, oldtag, newtag);
9526 return -1;
9527 }
9528
9529 return result;
9530 #undef T
9531 }
9532
9533 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9534 are conflicting attributes. */
9535
9536 static bfd_boolean
9537 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9538 {
9539 obj_attribute *in_attr;
9540 obj_attribute *out_attr;
9541 obj_attribute_list *in_list;
9542 obj_attribute_list *out_list;
9543 obj_attribute_list **out_listp;
9544 /* Some tags have 0 = don't care, 1 = strong requirement,
9545 2 = weak requirement. */
9546 static const int order_021[3] = {0, 2, 1};
9547 /* For use with Tag_VFP_arch. */
9548 static const int order_01243[5] = {0, 1, 2, 4, 3};
9549 int i;
9550 bfd_boolean result = TRUE;
9551
9552 /* Skip the linker stubs file. This preserves previous behavior
9553 of accepting unknown attributes in the first input file - but
9554 is that a bug? */
9555 if (ibfd->flags & BFD_LINKER_CREATED)
9556 return TRUE;
9557
9558 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9559 {
9560 /* This is the first object. Copy the attributes. */
9561 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9562
9563 /* Use the Tag_null value to indicate the attributes have been
9564 initialized. */
9565 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9566
9567 return TRUE;
9568 }
9569
9570 in_attr = elf_known_obj_attributes_proc (ibfd);
9571 out_attr = elf_known_obj_attributes_proc (obfd);
9572 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9573 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9574 {
9575 /* Ignore mismatches if the object doesn't use floating point. */
9576 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9577 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9578 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9579 {
9580 _bfd_error_handler
9581 (_("error: %B uses VFP register arguments, %B does not"),
9582 ibfd, obfd);
9583 result = FALSE;
9584 }
9585 }
9586
9587 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9588 {
9589 /* Merge this attribute with existing attributes. */
9590 switch (i)
9591 {
9592 case Tag_CPU_raw_name:
9593 case Tag_CPU_name:
9594 /* These are merged after Tag_CPU_arch. */
9595 break;
9596
9597 case Tag_ABI_optimization_goals:
9598 case Tag_ABI_FP_optimization_goals:
9599 /* Use the first value seen. */
9600 break;
9601
9602 case Tag_CPU_arch:
9603 {
9604 int secondary_compat = -1, secondary_compat_out = -1;
9605 unsigned int saved_out_attr = out_attr[i].i;
9606 static const char *name_table[] = {
9607 /* These aren't real CPU names, but we can't guess
9608 that from the architecture version alone. */
9609 "Pre v4",
9610 "ARM v4",
9611 "ARM v4T",
9612 "ARM v5T",
9613 "ARM v5TE",
9614 "ARM v5TEJ",
9615 "ARM v6",
9616 "ARM v6KZ",
9617 "ARM v6T2",
9618 "ARM v6K",
9619 "ARM v7",
9620 "ARM v6-M",
9621 "ARM v6S-M"
9622 };
9623
9624 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9625 secondary_compat = get_secondary_compatible_arch (ibfd);
9626 secondary_compat_out = get_secondary_compatible_arch (obfd);
9627 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9628 &secondary_compat_out,
9629 in_attr[i].i,
9630 secondary_compat);
9631 set_secondary_compatible_arch (obfd, secondary_compat_out);
9632
9633 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9634 if (out_attr[i].i == saved_out_attr)
9635 ; /* Leave the names alone. */
9636 else if (out_attr[i].i == in_attr[i].i)
9637 {
9638 /* The output architecture has been changed to match the
9639 input architecture. Use the input names. */
9640 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9641 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9642 : NULL;
9643 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9644 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9645 : NULL;
9646 }
9647 else
9648 {
9649 out_attr[Tag_CPU_name].s = NULL;
9650 out_attr[Tag_CPU_raw_name].s = NULL;
9651 }
9652
9653 /* If we still don't have a value for Tag_CPU_name,
9654 make one up now. Tag_CPU_raw_name remains blank. */
9655 if (out_attr[Tag_CPU_name].s == NULL
9656 && out_attr[i].i < ARRAY_SIZE (name_table))
9657 out_attr[Tag_CPU_name].s =
9658 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9659 }
9660 break;
9661
9662 case Tag_ARM_ISA_use:
9663 case Tag_THUMB_ISA_use:
9664 case Tag_WMMX_arch:
9665 case Tag_Advanced_SIMD_arch:
9666 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9667 case Tag_ABI_FP_rounding:
9668 case Tag_ABI_FP_exceptions:
9669 case Tag_ABI_FP_user_exceptions:
9670 case Tag_ABI_FP_number_model:
9671 case Tag_VFP_HP_extension:
9672 case Tag_CPU_unaligned_access:
9673 case Tag_T2EE_use:
9674 case Tag_Virtualization_use:
9675 case Tag_MPextension_use:
9676 /* Use the largest value specified. */
9677 if (in_attr[i].i > out_attr[i].i)
9678 out_attr[i].i = in_attr[i].i;
9679 break;
9680
9681 case Tag_ABI_align8_preserved:
9682 case Tag_ABI_PCS_RO_data:
9683 /* Use the smallest value specified. */
9684 if (in_attr[i].i < out_attr[i].i)
9685 out_attr[i].i = in_attr[i].i;
9686 break;
9687
9688 case Tag_ABI_align8_needed:
9689 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9690 && (in_attr[Tag_ABI_align8_preserved].i == 0
9691 || out_attr[Tag_ABI_align8_preserved].i == 0))
9692 {
9693 /* This error message should be enabled once all non-conformant
9694 binaries in the toolchain have had the attributes set
9695 properly.
9696 _bfd_error_handler
9697 (_("error: %B: 8-byte data alignment conflicts with %B"),
9698 obfd, ibfd);
9699 result = FALSE; */
9700 }
9701 /* Fall through. */
9702 case Tag_ABI_FP_denormal:
9703 case Tag_ABI_PCS_GOT_use:
9704 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9705 value if greater than 2 (for future-proofing). */
9706 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9707 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9708 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9709 out_attr[i].i = in_attr[i].i;
9710 break;
9711
9712
9713 case Tag_CPU_arch_profile:
9714 if (out_attr[i].i != in_attr[i].i)
9715 {
9716 /* 0 will merge with anything.
9717 'A' and 'S' merge to 'A'.
9718 'R' and 'S' merge to 'R'.
9719 'M' and 'A|R|S' is an error. */
9720 if (out_attr[i].i == 0
9721 || (out_attr[i].i == 'S'
9722 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9723 out_attr[i].i = in_attr[i].i;
9724 else if (in_attr[i].i == 0
9725 || (in_attr[i].i == 'S'
9726 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9727 ; /* Do nothing. */
9728 else
9729 {
9730 _bfd_error_handler
9731 (_("error: %B: Conflicting architecture profiles %c/%c"),
9732 ibfd,
9733 in_attr[i].i ? in_attr[i].i : '0',
9734 out_attr[i].i ? out_attr[i].i : '0');
9735 result = FALSE;
9736 }
9737 }
9738 break;
9739 case Tag_VFP_arch:
9740 /* Use the "greatest" from the sequence 0, 1, 2, 4, 3, or the
9741 largest value if greater than 4 (for future-proofing). */
9742 if ((in_attr[i].i > 4 && in_attr[i].i > out_attr[i].i)
9743 || (in_attr[i].i <= 4 && out_attr[i].i <= 4
9744 && order_01243[in_attr[i].i] > order_01243[out_attr[i].i]))
9745 out_attr[i].i = in_attr[i].i;
9746 break;
9747 case Tag_PCS_config:
9748 if (out_attr[i].i == 0)
9749 out_attr[i].i = in_attr[i].i;
9750 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
9751 {
9752 /* It's sometimes ok to mix different configs, so this is only
9753 a warning. */
9754 _bfd_error_handler
9755 (_("Warning: %B: Conflicting platform configuration"), ibfd);
9756 }
9757 break;
9758 case Tag_ABI_PCS_R9_use:
9759 if (in_attr[i].i != out_attr[i].i
9760 && out_attr[i].i != AEABI_R9_unused
9761 && in_attr[i].i != AEABI_R9_unused)
9762 {
9763 _bfd_error_handler
9764 (_("error: %B: Conflicting use of R9"), ibfd);
9765 result = FALSE;
9766 }
9767 if (out_attr[i].i == AEABI_R9_unused)
9768 out_attr[i].i = in_attr[i].i;
9769 break;
9770 case Tag_ABI_PCS_RW_data:
9771 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
9772 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
9773 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
9774 {
9775 _bfd_error_handler
9776 (_("error: %B: SB relative addressing conflicts with use of R9"),
9777 ibfd);
9778 result = FALSE;
9779 }
9780 /* Use the smallest value specified. */
9781 if (in_attr[i].i < out_attr[i].i)
9782 out_attr[i].i = in_attr[i].i;
9783 break;
9784 case Tag_ABI_PCS_wchar_t:
9785 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
9786 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
9787 {
9788 _bfd_error_handler
9789 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
9790 ibfd, in_attr[i].i, out_attr[i].i);
9791 }
9792 else if (in_attr[i].i && !out_attr[i].i)
9793 out_attr[i].i = in_attr[i].i;
9794 break;
9795 case Tag_ABI_enum_size:
9796 if (in_attr[i].i != AEABI_enum_unused)
9797 {
9798 if (out_attr[i].i == AEABI_enum_unused
9799 || out_attr[i].i == AEABI_enum_forced_wide)
9800 {
9801 /* The existing object is compatible with anything.
9802 Use whatever requirements the new object has. */
9803 out_attr[i].i = in_attr[i].i;
9804 }
9805 else if (in_attr[i].i != AEABI_enum_forced_wide
9806 && out_attr[i].i != in_attr[i].i
9807 && !elf_arm_tdata (obfd)->no_enum_size_warning)
9808 {
9809 static const char *aeabi_enum_names[] =
9810 { "", "variable-size", "32-bit", "" };
9811 const char *in_name =
9812 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9813 ? aeabi_enum_names[in_attr[i].i]
9814 : "<unknown>";
9815 const char *out_name =
9816 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
9817 ? aeabi_enum_names[out_attr[i].i]
9818 : "<unknown>";
9819 _bfd_error_handler
9820 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
9821 ibfd, in_name, out_name);
9822 }
9823 }
9824 break;
9825 case Tag_ABI_VFP_args:
9826 /* Aready done. */
9827 break;
9828 case Tag_ABI_WMMX_args:
9829 if (in_attr[i].i != out_attr[i].i)
9830 {
9831 _bfd_error_handler
9832 (_("error: %B uses iWMMXt register arguments, %B does not"),
9833 ibfd, obfd);
9834 result = FALSE;
9835 }
9836 break;
9837 case Tag_compatibility:
9838 /* Merged in target-independent code. */
9839 break;
9840 case Tag_ABI_HardFP_use:
9841 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
9842 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
9843 || (in_attr[i].i == 2 && out_attr[i].i == 1))
9844 out_attr[i].i = 3;
9845 else if (in_attr[i].i > out_attr[i].i)
9846 out_attr[i].i = in_attr[i].i;
9847 break;
9848 case Tag_ABI_FP_16bit_format:
9849 if (in_attr[i].i != 0 && out_attr[i].i != 0)
9850 {
9851 if (in_attr[i].i != out_attr[i].i)
9852 {
9853 _bfd_error_handler
9854 (_("error: fp16 format mismatch between %B and %B"),
9855 ibfd, obfd);
9856 result = FALSE;
9857 }
9858 }
9859 if (in_attr[i].i != 0)
9860 out_attr[i].i = in_attr[i].i;
9861 break;
9862
9863 case Tag_nodefaults:
9864 /* This tag is set if it exists, but the value is unused (and is
9865 typically zero). We don't actually need to do anything here -
9866 the merge happens automatically when the type flags are merged
9867 below. */
9868 break;
9869 case Tag_also_compatible_with:
9870 /* Already done in Tag_CPU_arch. */
9871 break;
9872 case Tag_conformance:
9873 /* Keep the attribute if it matches. Throw it away otherwise.
9874 No attribute means no claim to conform. */
9875 if (!in_attr[i].s || !out_attr[i].s
9876 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
9877 out_attr[i].s = NULL;
9878 break;
9879
9880 default:
9881 {
9882 bfd *err_bfd = NULL;
9883
9884 /* The "known_obj_attributes" table does contain some undefined
9885 attributes. Ensure that there are unused. */
9886 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
9887 err_bfd = obfd;
9888 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
9889 err_bfd = ibfd;
9890
9891 if (err_bfd != NULL)
9892 {
9893 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9894 if ((i & 127) < 64)
9895 {
9896 _bfd_error_handler
9897 (_("%B: Unknown mandatory EABI object attribute %d"),
9898 err_bfd, i);
9899 bfd_set_error (bfd_error_bad_value);
9900 result = FALSE;
9901 }
9902 else
9903 {
9904 _bfd_error_handler
9905 (_("Warning: %B: Unknown EABI object attribute %d"),
9906 err_bfd, i);
9907 }
9908 }
9909
9910 /* Only pass on attributes that match in both inputs. */
9911 if (in_attr[i].i != out_attr[i].i
9912 || in_attr[i].s != out_attr[i].s
9913 || (in_attr[i].s != NULL && out_attr[i].s != NULL
9914 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
9915 {
9916 out_attr[i].i = 0;
9917 out_attr[i].s = NULL;
9918 }
9919 }
9920 }
9921
9922 /* If out_attr was copied from in_attr then it won't have a type yet. */
9923 if (in_attr[i].type && !out_attr[i].type)
9924 out_attr[i].type = in_attr[i].type;
9925 }
9926
9927 /* Merge Tag_compatibility attributes and any common GNU ones. */
9928 _bfd_elf_merge_object_attributes (ibfd, obfd);
9929
9930 /* Check for any attributes not known on ARM. */
9931 in_list = elf_other_obj_attributes_proc (ibfd);
9932 out_listp = &elf_other_obj_attributes_proc (obfd);
9933 out_list = *out_listp;
9934
9935 for (; in_list || out_list; )
9936 {
9937 bfd *err_bfd = NULL;
9938 int err_tag = 0;
9939
9940 /* The tags for each list are in numerical order. */
9941 /* If the tags are equal, then merge. */
9942 if (out_list && (!in_list || in_list->tag > out_list->tag))
9943 {
9944 /* This attribute only exists in obfd. We can't merge, and we don't
9945 know what the tag means, so delete it. */
9946 err_bfd = obfd;
9947 err_tag = out_list->tag;
9948 *out_listp = out_list->next;
9949 out_list = *out_listp;
9950 }
9951 else if (in_list && (!out_list || in_list->tag < out_list->tag))
9952 {
9953 /* This attribute only exists in ibfd. We can't merge, and we don't
9954 know what the tag means, so ignore it. */
9955 err_bfd = ibfd;
9956 err_tag = in_list->tag;
9957 in_list = in_list->next;
9958 }
9959 else /* The tags are equal. */
9960 {
9961 /* As present, all attributes in the list are unknown, and
9962 therefore can't be merged meaningfully. */
9963 err_bfd = obfd;
9964 err_tag = out_list->tag;
9965
9966 /* Only pass on attributes that match in both inputs. */
9967 if (in_list->attr.i != out_list->attr.i
9968 || in_list->attr.s != out_list->attr.s
9969 || (in_list->attr.s && out_list->attr.s
9970 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
9971 {
9972 /* No match. Delete the attribute. */
9973 *out_listp = out_list->next;
9974 out_list = *out_listp;
9975 }
9976 else
9977 {
9978 /* Matched. Keep the attribute and move to the next. */
9979 out_list = out_list->next;
9980 in_list = in_list->next;
9981 }
9982 }
9983
9984 if (err_bfd)
9985 {
9986 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
9987 if ((err_tag & 127) < 64)
9988 {
9989 _bfd_error_handler
9990 (_("%B: Unknown mandatory EABI object attribute %d"),
9991 err_bfd, err_tag);
9992 bfd_set_error (bfd_error_bad_value);
9993 result = FALSE;
9994 }
9995 else
9996 {
9997 _bfd_error_handler
9998 (_("Warning: %B: Unknown EABI object attribute %d"),
9999 err_bfd, err_tag);
10000 }
10001 }
10002 }
10003 return result;
10004 }
10005
10006
10007 /* Return TRUE if the two EABI versions are incompatible. */
10008
10009 static bfd_boolean
10010 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10011 {
10012 /* v4 and v5 are the same spec before and after it was released,
10013 so allow mixing them. */
10014 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10015 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10016 return TRUE;
10017
10018 return (iver == over);
10019 }
10020
10021 /* Merge backend specific data from an object file to the output
10022 object file when linking. */
10023
10024 static bfd_boolean
10025 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
10026 {
10027 flagword out_flags;
10028 flagword in_flags;
10029 bfd_boolean flags_compatible = TRUE;
10030 asection *sec;
10031
10032 /* Check if we have the same endianess. */
10033 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
10034 return FALSE;
10035
10036 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
10037 return TRUE;
10038
10039 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
10040 return FALSE;
10041
10042 /* The input BFD must have had its flags initialised. */
10043 /* The following seems bogus to me -- The flags are initialized in
10044 the assembler but I don't think an elf_flags_init field is
10045 written into the object. */
10046 /* BFD_ASSERT (elf_flags_init (ibfd)); */
10047
10048 in_flags = elf_elfheader (ibfd)->e_flags;
10049 out_flags = elf_elfheader (obfd)->e_flags;
10050
10051 /* In theory there is no reason why we couldn't handle this. However
10052 in practice it isn't even close to working and there is no real
10053 reason to want it. */
10054 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
10055 && !(ibfd->flags & DYNAMIC)
10056 && (in_flags & EF_ARM_BE8))
10057 {
10058 _bfd_error_handler (_("error: %B is already in final BE8 format"),
10059 ibfd);
10060 return FALSE;
10061 }
10062
10063 if (!elf_flags_init (obfd))
10064 {
10065 /* If the input is the default architecture and had the default
10066 flags then do not bother setting the flags for the output
10067 architecture, instead allow future merges to do this. If no
10068 future merges ever set these flags then they will retain their
10069 uninitialised values, which surprise surprise, correspond
10070 to the default values. */
10071 if (bfd_get_arch_info (ibfd)->the_default
10072 && elf_elfheader (ibfd)->e_flags == 0)
10073 return TRUE;
10074
10075 elf_flags_init (obfd) = TRUE;
10076 elf_elfheader (obfd)->e_flags = in_flags;
10077
10078 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
10079 && bfd_get_arch_info (obfd)->the_default)
10080 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
10081
10082 return TRUE;
10083 }
10084
10085 /* Determine what should happen if the input ARM architecture
10086 does not match the output ARM architecture. */
10087 if (! bfd_arm_merge_machines (ibfd, obfd))
10088 return FALSE;
10089
10090 /* Identical flags must be compatible. */
10091 if (in_flags == out_flags)
10092 return TRUE;
10093
10094 /* Check to see if the input BFD actually contains any sections. If
10095 not, its flags may not have been initialised either, but it
10096 cannot actually cause any incompatiblity. Do not short-circuit
10097 dynamic objects; their section list may be emptied by
10098 elf_link_add_object_symbols.
10099
10100 Also check to see if there are no code sections in the input.
10101 In this case there is no need to check for code specific flags.
10102 XXX - do we need to worry about floating-point format compatability
10103 in data sections ? */
10104 if (!(ibfd->flags & DYNAMIC))
10105 {
10106 bfd_boolean null_input_bfd = TRUE;
10107 bfd_boolean only_data_sections = TRUE;
10108
10109 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
10110 {
10111 /* Ignore synthetic glue sections. */
10112 if (strcmp (sec->name, ".glue_7")
10113 && strcmp (sec->name, ".glue_7t"))
10114 {
10115 if ((bfd_get_section_flags (ibfd, sec)
10116 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10117 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
10118 only_data_sections = FALSE;
10119
10120 null_input_bfd = FALSE;
10121 break;
10122 }
10123 }
10124
10125 if (null_input_bfd || only_data_sections)
10126 return TRUE;
10127 }
10128
10129 /* Complain about various flag mismatches. */
10130 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
10131 EF_ARM_EABI_VERSION (out_flags)))
10132 {
10133 _bfd_error_handler
10134 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
10135 ibfd, obfd,
10136 (in_flags & EF_ARM_EABIMASK) >> 24,
10137 (out_flags & EF_ARM_EABIMASK) >> 24);
10138 return FALSE;
10139 }
10140
10141 /* Not sure what needs to be checked for EABI versions >= 1. */
10142 /* VxWorks libraries do not use these flags. */
10143 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
10144 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
10145 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
10146 {
10147 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
10148 {
10149 _bfd_error_handler
10150 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
10151 ibfd, obfd,
10152 in_flags & EF_ARM_APCS_26 ? 26 : 32,
10153 out_flags & EF_ARM_APCS_26 ? 26 : 32);
10154 flags_compatible = FALSE;
10155 }
10156
10157 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
10158 {
10159 if (in_flags & EF_ARM_APCS_FLOAT)
10160 _bfd_error_handler
10161 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
10162 ibfd, obfd);
10163 else
10164 _bfd_error_handler
10165 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
10166 ibfd, obfd);
10167
10168 flags_compatible = FALSE;
10169 }
10170
10171 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
10172 {
10173 if (in_flags & EF_ARM_VFP_FLOAT)
10174 _bfd_error_handler
10175 (_("error: %B uses VFP instructions, whereas %B does not"),
10176 ibfd, obfd);
10177 else
10178 _bfd_error_handler
10179 (_("error: %B uses FPA instructions, whereas %B does not"),
10180 ibfd, obfd);
10181
10182 flags_compatible = FALSE;
10183 }
10184
10185 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
10186 {
10187 if (in_flags & EF_ARM_MAVERICK_FLOAT)
10188 _bfd_error_handler
10189 (_("error: %B uses Maverick instructions, whereas %B does not"),
10190 ibfd, obfd);
10191 else
10192 _bfd_error_handler
10193 (_("error: %B does not use Maverick instructions, whereas %B does"),
10194 ibfd, obfd);
10195
10196 flags_compatible = FALSE;
10197 }
10198
10199 #ifdef EF_ARM_SOFT_FLOAT
10200 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
10201 {
10202 /* We can allow interworking between code that is VFP format
10203 layout, and uses either soft float or integer regs for
10204 passing floating point arguments and results. We already
10205 know that the APCS_FLOAT flags match; similarly for VFP
10206 flags. */
10207 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
10208 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
10209 {
10210 if (in_flags & EF_ARM_SOFT_FLOAT)
10211 _bfd_error_handler
10212 (_("error: %B uses software FP, whereas %B uses hardware FP"),
10213 ibfd, obfd);
10214 else
10215 _bfd_error_handler
10216 (_("error: %B uses hardware FP, whereas %B uses software FP"),
10217 ibfd, obfd);
10218
10219 flags_compatible = FALSE;
10220 }
10221 }
10222 #endif
10223
10224 /* Interworking mismatch is only a warning. */
10225 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
10226 {
10227 if (in_flags & EF_ARM_INTERWORK)
10228 {
10229 _bfd_error_handler
10230 (_("Warning: %B supports interworking, whereas %B does not"),
10231 ibfd, obfd);
10232 }
10233 else
10234 {
10235 _bfd_error_handler
10236 (_("Warning: %B does not support interworking, whereas %B does"),
10237 ibfd, obfd);
10238 }
10239 }
10240 }
10241
10242 return flags_compatible;
10243 }
10244
10245 /* Display the flags field. */
10246
10247 static bfd_boolean
10248 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10249 {
10250 FILE * file = (FILE *) ptr;
10251 unsigned long flags;
10252
10253 BFD_ASSERT (abfd != NULL && ptr != NULL);
10254
10255 /* Print normal ELF private data. */
10256 _bfd_elf_print_private_bfd_data (abfd, ptr);
10257
10258 flags = elf_elfheader (abfd)->e_flags;
10259 /* Ignore init flag - it may not be set, despite the flags field
10260 containing valid data. */
10261
10262 /* xgettext:c-format */
10263 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10264
10265 switch (EF_ARM_EABI_VERSION (flags))
10266 {
10267 case EF_ARM_EABI_UNKNOWN:
10268 /* The following flag bits are GNU extensions and not part of the
10269 official ARM ELF extended ABI. Hence they are only decoded if
10270 the EABI version is not set. */
10271 if (flags & EF_ARM_INTERWORK)
10272 fprintf (file, _(" [interworking enabled]"));
10273
10274 if (flags & EF_ARM_APCS_26)
10275 fprintf (file, " [APCS-26]");
10276 else
10277 fprintf (file, " [APCS-32]");
10278
10279 if (flags & EF_ARM_VFP_FLOAT)
10280 fprintf (file, _(" [VFP float format]"));
10281 else if (flags & EF_ARM_MAVERICK_FLOAT)
10282 fprintf (file, _(" [Maverick float format]"));
10283 else
10284 fprintf (file, _(" [FPA float format]"));
10285
10286 if (flags & EF_ARM_APCS_FLOAT)
10287 fprintf (file, _(" [floats passed in float registers]"));
10288
10289 if (flags & EF_ARM_PIC)
10290 fprintf (file, _(" [position independent]"));
10291
10292 if (flags & EF_ARM_NEW_ABI)
10293 fprintf (file, _(" [new ABI]"));
10294
10295 if (flags & EF_ARM_OLD_ABI)
10296 fprintf (file, _(" [old ABI]"));
10297
10298 if (flags & EF_ARM_SOFT_FLOAT)
10299 fprintf (file, _(" [software FP]"));
10300
10301 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10302 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10303 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10304 | EF_ARM_MAVERICK_FLOAT);
10305 break;
10306
10307 case EF_ARM_EABI_VER1:
10308 fprintf (file, _(" [Version1 EABI]"));
10309
10310 if (flags & EF_ARM_SYMSARESORTED)
10311 fprintf (file, _(" [sorted symbol table]"));
10312 else
10313 fprintf (file, _(" [unsorted symbol table]"));
10314
10315 flags &= ~ EF_ARM_SYMSARESORTED;
10316 break;
10317
10318 case EF_ARM_EABI_VER2:
10319 fprintf (file, _(" [Version2 EABI]"));
10320
10321 if (flags & EF_ARM_SYMSARESORTED)
10322 fprintf (file, _(" [sorted symbol table]"));
10323 else
10324 fprintf (file, _(" [unsorted symbol table]"));
10325
10326 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10327 fprintf (file, _(" [dynamic symbols use segment index]"));
10328
10329 if (flags & EF_ARM_MAPSYMSFIRST)
10330 fprintf (file, _(" [mapping symbols precede others]"));
10331
10332 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10333 | EF_ARM_MAPSYMSFIRST);
10334 break;
10335
10336 case EF_ARM_EABI_VER3:
10337 fprintf (file, _(" [Version3 EABI]"));
10338 break;
10339
10340 case EF_ARM_EABI_VER4:
10341 fprintf (file, _(" [Version4 EABI]"));
10342 goto eabi;
10343
10344 case EF_ARM_EABI_VER5:
10345 fprintf (file, _(" [Version5 EABI]"));
10346 eabi:
10347 if (flags & EF_ARM_BE8)
10348 fprintf (file, _(" [BE8]"));
10349
10350 if (flags & EF_ARM_LE8)
10351 fprintf (file, _(" [LE8]"));
10352
10353 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10354 break;
10355
10356 default:
10357 fprintf (file, _(" <EABI version unrecognised>"));
10358 break;
10359 }
10360
10361 flags &= ~ EF_ARM_EABIMASK;
10362
10363 if (flags & EF_ARM_RELEXEC)
10364 fprintf (file, _(" [relocatable executable]"));
10365
10366 if (flags & EF_ARM_HASENTRY)
10367 fprintf (file, _(" [has entry point]"));
10368
10369 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10370
10371 if (flags)
10372 fprintf (file, _("<Unrecognised flag bits set>"));
10373
10374 fputc ('\n', file);
10375
10376 return TRUE;
10377 }
10378
10379 static int
10380 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10381 {
10382 switch (ELF_ST_TYPE (elf_sym->st_info))
10383 {
10384 case STT_ARM_TFUNC:
10385 return ELF_ST_TYPE (elf_sym->st_info);
10386
10387 case STT_ARM_16BIT:
10388 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10389 This allows us to distinguish between data used by Thumb instructions
10390 and non-data (which is probably code) inside Thumb regions of an
10391 executable. */
10392 if (type != STT_OBJECT && type != STT_TLS)
10393 return ELF_ST_TYPE (elf_sym->st_info);
10394 break;
10395
10396 default:
10397 break;
10398 }
10399
10400 return type;
10401 }
10402
10403 static asection *
10404 elf32_arm_gc_mark_hook (asection *sec,
10405 struct bfd_link_info *info,
10406 Elf_Internal_Rela *rel,
10407 struct elf_link_hash_entry *h,
10408 Elf_Internal_Sym *sym)
10409 {
10410 if (h != NULL)
10411 switch (ELF32_R_TYPE (rel->r_info))
10412 {
10413 case R_ARM_GNU_VTINHERIT:
10414 case R_ARM_GNU_VTENTRY:
10415 return NULL;
10416 }
10417
10418 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10419 }
10420
10421 /* Update the got entry reference counts for the section being removed. */
10422
10423 static bfd_boolean
10424 elf32_arm_gc_sweep_hook (bfd * abfd,
10425 struct bfd_link_info * info,
10426 asection * sec,
10427 const Elf_Internal_Rela * relocs)
10428 {
10429 Elf_Internal_Shdr *symtab_hdr;
10430 struct elf_link_hash_entry **sym_hashes;
10431 bfd_signed_vma *local_got_refcounts;
10432 const Elf_Internal_Rela *rel, *relend;
10433 struct elf32_arm_link_hash_table * globals;
10434
10435 if (info->relocatable)
10436 return TRUE;
10437
10438 globals = elf32_arm_hash_table (info);
10439
10440 elf_section_data (sec)->local_dynrel = NULL;
10441
10442 symtab_hdr = & elf_symtab_hdr (abfd);
10443 sym_hashes = elf_sym_hashes (abfd);
10444 local_got_refcounts = elf_local_got_refcounts (abfd);
10445
10446 check_use_blx (globals);
10447
10448 relend = relocs + sec->reloc_count;
10449 for (rel = relocs; rel < relend; rel++)
10450 {
10451 unsigned long r_symndx;
10452 struct elf_link_hash_entry *h = NULL;
10453 int r_type;
10454
10455 r_symndx = ELF32_R_SYM (rel->r_info);
10456 if (r_symndx >= symtab_hdr->sh_info)
10457 {
10458 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10459 while (h->root.type == bfd_link_hash_indirect
10460 || h->root.type == bfd_link_hash_warning)
10461 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10462 }
10463
10464 r_type = ELF32_R_TYPE (rel->r_info);
10465 r_type = arm_real_reloc_type (globals, r_type);
10466 switch (r_type)
10467 {
10468 case R_ARM_GOT32:
10469 case R_ARM_GOT_PREL:
10470 case R_ARM_TLS_GD32:
10471 case R_ARM_TLS_IE32:
10472 if (h != NULL)
10473 {
10474 if (h->got.refcount > 0)
10475 h->got.refcount -= 1;
10476 }
10477 else if (local_got_refcounts != NULL)
10478 {
10479 if (local_got_refcounts[r_symndx] > 0)
10480 local_got_refcounts[r_symndx] -= 1;
10481 }
10482 break;
10483
10484 case R_ARM_TLS_LDM32:
10485 elf32_arm_hash_table (info)->tls_ldm_got.refcount -= 1;
10486 break;
10487
10488 case R_ARM_ABS32:
10489 case R_ARM_ABS32_NOI:
10490 case R_ARM_REL32:
10491 case R_ARM_REL32_NOI:
10492 case R_ARM_PC24:
10493 case R_ARM_PLT32:
10494 case R_ARM_CALL:
10495 case R_ARM_JUMP24:
10496 case R_ARM_PREL31:
10497 case R_ARM_THM_CALL:
10498 case R_ARM_THM_JUMP24:
10499 case R_ARM_THM_JUMP19:
10500 case R_ARM_MOVW_ABS_NC:
10501 case R_ARM_MOVT_ABS:
10502 case R_ARM_MOVW_PREL_NC:
10503 case R_ARM_MOVT_PREL:
10504 case R_ARM_THM_MOVW_ABS_NC:
10505 case R_ARM_THM_MOVT_ABS:
10506 case R_ARM_THM_MOVW_PREL_NC:
10507 case R_ARM_THM_MOVT_PREL:
10508 /* Should the interworking branches be here also? */
10509
10510 if (h != NULL)
10511 {
10512 struct elf32_arm_link_hash_entry *eh;
10513 struct elf32_arm_relocs_copied **pp;
10514 struct elf32_arm_relocs_copied *p;
10515
10516 eh = (struct elf32_arm_link_hash_entry *) h;
10517
10518 if (h->plt.refcount > 0)
10519 {
10520 h->plt.refcount -= 1;
10521 if (r_type == R_ARM_THM_CALL)
10522 eh->plt_maybe_thumb_refcount--;
10523
10524 if (r_type == R_ARM_THM_JUMP24
10525 || r_type == R_ARM_THM_JUMP19)
10526 eh->plt_thumb_refcount--;
10527 }
10528
10529 if (r_type == R_ARM_ABS32
10530 || r_type == R_ARM_REL32
10531 || r_type == R_ARM_ABS32_NOI
10532 || r_type == R_ARM_REL32_NOI)
10533 {
10534 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10535 pp = &p->next)
10536 if (p->section == sec)
10537 {
10538 p->count -= 1;
10539 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10540 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10541 p->pc_count -= 1;
10542 if (p->count == 0)
10543 *pp = p->next;
10544 break;
10545 }
10546 }
10547 }
10548 break;
10549
10550 default:
10551 break;
10552 }
10553 }
10554
10555 return TRUE;
10556 }
10557
10558 /* Look through the relocs for a section during the first phase. */
10559
10560 static bfd_boolean
10561 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10562 asection *sec, const Elf_Internal_Rela *relocs)
10563 {
10564 Elf_Internal_Shdr *symtab_hdr;
10565 struct elf_link_hash_entry **sym_hashes;
10566 const Elf_Internal_Rela *rel;
10567 const Elf_Internal_Rela *rel_end;
10568 bfd *dynobj;
10569 asection *sreloc;
10570 bfd_vma *local_got_offsets;
10571 struct elf32_arm_link_hash_table *htab;
10572 bfd_boolean needs_plt;
10573 unsigned long nsyms;
10574
10575 if (info->relocatable)
10576 return TRUE;
10577
10578 BFD_ASSERT (is_arm_elf (abfd));
10579
10580 htab = elf32_arm_hash_table (info);
10581 sreloc = NULL;
10582
10583 /* Create dynamic sections for relocatable executables so that we can
10584 copy relocations. */
10585 if (htab->root.is_relocatable_executable
10586 && ! htab->root.dynamic_sections_created)
10587 {
10588 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10589 return FALSE;
10590 }
10591
10592 dynobj = elf_hash_table (info)->dynobj;
10593 local_got_offsets = elf_local_got_offsets (abfd);
10594
10595 symtab_hdr = & elf_symtab_hdr (abfd);
10596 sym_hashes = elf_sym_hashes (abfd);
10597 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10598
10599 rel_end = relocs + sec->reloc_count;
10600 for (rel = relocs; rel < rel_end; rel++)
10601 {
10602 struct elf_link_hash_entry *h;
10603 struct elf32_arm_link_hash_entry *eh;
10604 unsigned long r_symndx;
10605 int r_type;
10606
10607 r_symndx = ELF32_R_SYM (rel->r_info);
10608 r_type = ELF32_R_TYPE (rel->r_info);
10609 r_type = arm_real_reloc_type (htab, r_type);
10610
10611 if (r_symndx >= nsyms
10612 /* PR 9934: It is possible to have relocations that do not
10613 refer to symbols, thus it is also possible to have an
10614 object file containing relocations but no symbol table. */
10615 && (r_symndx > 0 || nsyms > 0))
10616 {
10617 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10618 r_symndx);
10619 return FALSE;
10620 }
10621
10622 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10623 h = NULL;
10624 else
10625 {
10626 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10627 while (h->root.type == bfd_link_hash_indirect
10628 || h->root.type == bfd_link_hash_warning)
10629 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10630 }
10631
10632 eh = (struct elf32_arm_link_hash_entry *) h;
10633
10634 switch (r_type)
10635 {
10636 case R_ARM_GOT32:
10637 case R_ARM_GOT_PREL:
10638 case R_ARM_TLS_GD32:
10639 case R_ARM_TLS_IE32:
10640 /* This symbol requires a global offset table entry. */
10641 {
10642 int tls_type, old_tls_type;
10643
10644 switch (r_type)
10645 {
10646 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10647 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10648 default: tls_type = GOT_NORMAL; break;
10649 }
10650
10651 if (h != NULL)
10652 {
10653 h->got.refcount++;
10654 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10655 }
10656 else
10657 {
10658 bfd_signed_vma *local_got_refcounts;
10659
10660 /* This is a global offset table entry for a local symbol. */
10661 local_got_refcounts = elf_local_got_refcounts (abfd);
10662 if (local_got_refcounts == NULL)
10663 {
10664 bfd_size_type size;
10665
10666 size = symtab_hdr->sh_info;
10667 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10668 local_got_refcounts = bfd_zalloc (abfd, size);
10669 if (local_got_refcounts == NULL)
10670 return FALSE;
10671 elf_local_got_refcounts (abfd) = local_got_refcounts;
10672 elf32_arm_local_got_tls_type (abfd)
10673 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10674 }
10675 local_got_refcounts[r_symndx] += 1;
10676 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10677 }
10678
10679 /* We will already have issued an error message if there is a
10680 TLS / non-TLS mismatch, based on the symbol type. We don't
10681 support any linker relaxations. So just combine any TLS
10682 types needed. */
10683 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10684 && tls_type != GOT_NORMAL)
10685 tls_type |= old_tls_type;
10686
10687 if (old_tls_type != tls_type)
10688 {
10689 if (h != NULL)
10690 elf32_arm_hash_entry (h)->tls_type = tls_type;
10691 else
10692 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10693 }
10694 }
10695 /* Fall through. */
10696
10697 case R_ARM_TLS_LDM32:
10698 if (r_type == R_ARM_TLS_LDM32)
10699 htab->tls_ldm_got.refcount++;
10700 /* Fall through. */
10701
10702 case R_ARM_GOTOFF32:
10703 case R_ARM_GOTPC:
10704 if (htab->sgot == NULL)
10705 {
10706 if (htab->root.dynobj == NULL)
10707 htab->root.dynobj = abfd;
10708 if (!create_got_section (htab->root.dynobj, info))
10709 return FALSE;
10710 }
10711 break;
10712
10713 case R_ARM_ABS12:
10714 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10715 ldr __GOTT_INDEX__ offsets. */
10716 if (!htab->vxworks_p)
10717 break;
10718 /* Fall through. */
10719
10720 case R_ARM_PC24:
10721 case R_ARM_PLT32:
10722 case R_ARM_CALL:
10723 case R_ARM_JUMP24:
10724 case R_ARM_PREL31:
10725 case R_ARM_THM_CALL:
10726 case R_ARM_THM_JUMP24:
10727 case R_ARM_THM_JUMP19:
10728 needs_plt = 1;
10729 goto normal_reloc;
10730
10731 case R_ARM_MOVW_ABS_NC:
10732 case R_ARM_MOVT_ABS:
10733 case R_ARM_THM_MOVW_ABS_NC:
10734 case R_ARM_THM_MOVT_ABS:
10735 if (info->shared)
10736 {
10737 (*_bfd_error_handler)
10738 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10739 abfd, elf32_arm_howto_table_1[r_type].name,
10740 (h) ? h->root.root.string : "a local symbol");
10741 bfd_set_error (bfd_error_bad_value);
10742 return FALSE;
10743 }
10744
10745 /* Fall through. */
10746 case R_ARM_ABS32:
10747 case R_ARM_ABS32_NOI:
10748 case R_ARM_REL32:
10749 case R_ARM_REL32_NOI:
10750 case R_ARM_MOVW_PREL_NC:
10751 case R_ARM_MOVT_PREL:
10752 case R_ARM_THM_MOVW_PREL_NC:
10753 case R_ARM_THM_MOVT_PREL:
10754 needs_plt = 0;
10755 normal_reloc:
10756
10757 /* Should the interworking branches be listed here? */
10758 if (h != NULL)
10759 {
10760 /* If this reloc is in a read-only section, we might
10761 need a copy reloc. We can't check reliably at this
10762 stage whether the section is read-only, as input
10763 sections have not yet been mapped to output sections.
10764 Tentatively set the flag for now, and correct in
10765 adjust_dynamic_symbol. */
10766 if (!info->shared)
10767 h->non_got_ref = 1;
10768
10769 /* We may need a .plt entry if the function this reloc
10770 refers to is in a different object. We can't tell for
10771 sure yet, because something later might force the
10772 symbol local. */
10773 if (needs_plt)
10774 h->needs_plt = 1;
10775
10776 /* If we create a PLT entry, this relocation will reference
10777 it, even if it's an ABS32 relocation. */
10778 h->plt.refcount += 1;
10779
10780 /* It's too early to use htab->use_blx here, so we have to
10781 record possible blx references separately from
10782 relocs that definitely need a thumb stub. */
10783
10784 if (r_type == R_ARM_THM_CALL)
10785 eh->plt_maybe_thumb_refcount += 1;
10786
10787 if (r_type == R_ARM_THM_JUMP24
10788 || r_type == R_ARM_THM_JUMP19)
10789 eh->plt_thumb_refcount += 1;
10790 }
10791
10792 /* If we are creating a shared library or relocatable executable,
10793 and this is a reloc against a global symbol, or a non PC
10794 relative reloc against a local symbol, then we need to copy
10795 the reloc into the shared library. However, if we are linking
10796 with -Bsymbolic, we do not need to copy a reloc against a
10797 global symbol which is defined in an object we are
10798 including in the link (i.e., DEF_REGULAR is set). At
10799 this point we have not seen all the input files, so it is
10800 possible that DEF_REGULAR is not set now but will be set
10801 later (it is never cleared). We account for that
10802 possibility below by storing information in the
10803 relocs_copied field of the hash table entry. */
10804 if ((info->shared || htab->root.is_relocatable_executable)
10805 && (sec->flags & SEC_ALLOC) != 0
10806 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10807 || (h != NULL && ! h->needs_plt
10808 && (! info->symbolic || ! h->def_regular))))
10809 {
10810 struct elf32_arm_relocs_copied *p, **head;
10811
10812 /* When creating a shared object, we must copy these
10813 reloc types into the output file. We create a reloc
10814 section in dynobj and make room for this reloc. */
10815 if (sreloc == NULL)
10816 {
10817 sreloc = _bfd_elf_make_dynamic_reloc_section
10818 (sec, dynobj, 2, abfd, ! htab->use_rel);
10819
10820 if (sreloc == NULL)
10821 return FALSE;
10822
10823 /* BPABI objects never have dynamic relocations mapped. */
10824 if (htab->symbian_p)
10825 {
10826 flagword flags;
10827
10828 flags = bfd_get_section_flags (dynobj, sreloc);
10829 flags &= ~(SEC_LOAD | SEC_ALLOC);
10830 bfd_set_section_flags (dynobj, sreloc, flags);
10831 }
10832 }
10833
10834 /* If this is a global symbol, we count the number of
10835 relocations we need for this symbol. */
10836 if (h != NULL)
10837 {
10838 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10839 }
10840 else
10841 {
10842 /* Track dynamic relocs needed for local syms too.
10843 We really need local syms available to do this
10844 easily. Oh well. */
10845
10846 asection *s;
10847 void *vpp;
10848
10849 s = bfd_section_from_r_symndx (abfd, &htab->sym_sec,
10850 sec, r_symndx);
10851 if (s == NULL)
10852 return FALSE;
10853
10854 vpp = &elf_section_data (s)->local_dynrel;
10855 head = (struct elf32_arm_relocs_copied **) vpp;
10856 }
10857
10858 p = *head;
10859 if (p == NULL || p->section != sec)
10860 {
10861 bfd_size_type amt = sizeof *p;
10862
10863 p = bfd_alloc (htab->root.dynobj, amt);
10864 if (p == NULL)
10865 return FALSE;
10866 p->next = *head;
10867 *head = p;
10868 p->section = sec;
10869 p->count = 0;
10870 p->pc_count = 0;
10871 }
10872
10873 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10874 p->pc_count += 1;
10875 p->count += 1;
10876 }
10877 break;
10878
10879 /* This relocation describes the C++ object vtable hierarchy.
10880 Reconstruct it for later use during GC. */
10881 case R_ARM_GNU_VTINHERIT:
10882 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10883 return FALSE;
10884 break;
10885
10886 /* This relocation describes which C++ vtable entries are actually
10887 used. Record for later use during GC. */
10888 case R_ARM_GNU_VTENTRY:
10889 BFD_ASSERT (h != NULL);
10890 if (h != NULL
10891 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10892 return FALSE;
10893 break;
10894 }
10895 }
10896
10897 return TRUE;
10898 }
10899
10900 /* Unwinding tables are not referenced directly. This pass marks them as
10901 required if the corresponding code section is marked. */
10902
10903 static bfd_boolean
10904 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10905 elf_gc_mark_hook_fn gc_mark_hook)
10906 {
10907 bfd *sub;
10908 Elf_Internal_Shdr **elf_shdrp;
10909 bfd_boolean again;
10910
10911 /* Marking EH data may cause additional code sections to be marked,
10912 requiring multiple passes. */
10913 again = TRUE;
10914 while (again)
10915 {
10916 again = FALSE;
10917 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
10918 {
10919 asection *o;
10920
10921 if (! is_arm_elf (sub))
10922 continue;
10923
10924 elf_shdrp = elf_elfsections (sub);
10925 for (o = sub->sections; o != NULL; o = o->next)
10926 {
10927 Elf_Internal_Shdr *hdr;
10928
10929 hdr = &elf_section_data (o)->this_hdr;
10930 if (hdr->sh_type == SHT_ARM_EXIDX
10931 && hdr->sh_link
10932 && hdr->sh_link < elf_numsections (sub)
10933 && !o->gc_mark
10934 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
10935 {
10936 again = TRUE;
10937 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
10938 return FALSE;
10939 }
10940 }
10941 }
10942 }
10943
10944 return TRUE;
10945 }
10946
10947 /* Treat mapping symbols as special target symbols. */
10948
10949 static bfd_boolean
10950 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
10951 {
10952 return bfd_is_arm_special_symbol_name (sym->name,
10953 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
10954 }
10955
10956 /* This is a copy of elf_find_function() from elf.c except that
10957 ARM mapping symbols are ignored when looking for function names
10958 and STT_ARM_TFUNC is considered to a function type. */
10959
10960 static bfd_boolean
10961 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
10962 asection * section,
10963 asymbol ** symbols,
10964 bfd_vma offset,
10965 const char ** filename_ptr,
10966 const char ** functionname_ptr)
10967 {
10968 const char * filename = NULL;
10969 asymbol * func = NULL;
10970 bfd_vma low_func = 0;
10971 asymbol ** p;
10972
10973 for (p = symbols; *p != NULL; p++)
10974 {
10975 elf_symbol_type *q;
10976
10977 q = (elf_symbol_type *) *p;
10978
10979 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
10980 {
10981 default:
10982 break;
10983 case STT_FILE:
10984 filename = bfd_asymbol_name (&q->symbol);
10985 break;
10986 case STT_FUNC:
10987 case STT_ARM_TFUNC:
10988 case STT_NOTYPE:
10989 /* Skip mapping symbols. */
10990 if ((q->symbol.flags & BSF_LOCAL)
10991 && bfd_is_arm_special_symbol_name (q->symbol.name,
10992 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
10993 continue;
10994 /* Fall through. */
10995 if (bfd_get_section (&q->symbol) == section
10996 && q->symbol.value >= low_func
10997 && q->symbol.value <= offset)
10998 {
10999 func = (asymbol *) q;
11000 low_func = q->symbol.value;
11001 }
11002 break;
11003 }
11004 }
11005
11006 if (func == NULL)
11007 return FALSE;
11008
11009 if (filename_ptr)
11010 *filename_ptr = filename;
11011 if (functionname_ptr)
11012 *functionname_ptr = bfd_asymbol_name (func);
11013
11014 return TRUE;
11015 }
11016
11017
11018 /* Find the nearest line to a particular section and offset, for error
11019 reporting. This code is a duplicate of the code in elf.c, except
11020 that it uses arm_elf_find_function. */
11021
11022 static bfd_boolean
11023 elf32_arm_find_nearest_line (bfd * abfd,
11024 asection * section,
11025 asymbol ** symbols,
11026 bfd_vma offset,
11027 const char ** filename_ptr,
11028 const char ** functionname_ptr,
11029 unsigned int * line_ptr)
11030 {
11031 bfd_boolean found = FALSE;
11032
11033 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11034
11035 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11036 filename_ptr, functionname_ptr,
11037 line_ptr, 0,
11038 & elf_tdata (abfd)->dwarf2_find_line_info))
11039 {
11040 if (!*functionname_ptr)
11041 arm_elf_find_function (abfd, section, symbols, offset,
11042 *filename_ptr ? NULL : filename_ptr,
11043 functionname_ptr);
11044
11045 return TRUE;
11046 }
11047
11048 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11049 & found, filename_ptr,
11050 functionname_ptr, line_ptr,
11051 & elf_tdata (abfd)->line_info))
11052 return FALSE;
11053
11054 if (found && (*functionname_ptr || *line_ptr))
11055 return TRUE;
11056
11057 if (symbols == NULL)
11058 return FALSE;
11059
11060 if (! arm_elf_find_function (abfd, section, symbols, offset,
11061 filename_ptr, functionname_ptr))
11062 return FALSE;
11063
11064 *line_ptr = 0;
11065 return TRUE;
11066 }
11067
11068 static bfd_boolean
11069 elf32_arm_find_inliner_info (bfd * abfd,
11070 const char ** filename_ptr,
11071 const char ** functionname_ptr,
11072 unsigned int * line_ptr)
11073 {
11074 bfd_boolean found;
11075 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11076 functionname_ptr, line_ptr,
11077 & elf_tdata (abfd)->dwarf2_find_line_info);
11078 return found;
11079 }
11080
11081 /* Adjust a symbol defined by a dynamic object and referenced by a
11082 regular object. The current definition is in some section of the
11083 dynamic object, but we're not including those sections. We have to
11084 change the definition to something the rest of the link can
11085 understand. */
11086
11087 static bfd_boolean
11088 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11089 struct elf_link_hash_entry * h)
11090 {
11091 bfd * dynobj;
11092 asection * s;
11093 struct elf32_arm_link_hash_entry * eh;
11094 struct elf32_arm_link_hash_table *globals;
11095
11096 globals = elf32_arm_hash_table (info);
11097 dynobj = elf_hash_table (info)->dynobj;
11098
11099 /* Make sure we know what is going on here. */
11100 BFD_ASSERT (dynobj != NULL
11101 && (h->needs_plt
11102 || h->u.weakdef != NULL
11103 || (h->def_dynamic
11104 && h->ref_regular
11105 && !h->def_regular)));
11106
11107 eh = (struct elf32_arm_link_hash_entry *) h;
11108
11109 /* If this is a function, put it in the procedure linkage table. We
11110 will fill in the contents of the procedure linkage table later,
11111 when we know the address of the .got section. */
11112 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11113 || h->needs_plt)
11114 {
11115 if (h->plt.refcount <= 0
11116 || SYMBOL_CALLS_LOCAL (info, h)
11117 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11118 && h->root.type == bfd_link_hash_undefweak))
11119 {
11120 /* This case can occur if we saw a PLT32 reloc in an input
11121 file, but the symbol was never referred to by a dynamic
11122 object, or if all references were garbage collected. In
11123 such a case, we don't actually need to build a procedure
11124 linkage table, and we can just do a PC24 reloc instead. */
11125 h->plt.offset = (bfd_vma) -1;
11126 eh->plt_thumb_refcount = 0;
11127 eh->plt_maybe_thumb_refcount = 0;
11128 h->needs_plt = 0;
11129 }
11130
11131 return TRUE;
11132 }
11133 else
11134 {
11135 /* It's possible that we incorrectly decided a .plt reloc was
11136 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11137 in check_relocs. We can't decide accurately between function
11138 and non-function syms in check-relocs; Objects loaded later in
11139 the link may change h->type. So fix it now. */
11140 h->plt.offset = (bfd_vma) -1;
11141 eh->plt_thumb_refcount = 0;
11142 eh->plt_maybe_thumb_refcount = 0;
11143 }
11144
11145 /* If this is a weak symbol, and there is a real definition, the
11146 processor independent code will have arranged for us to see the
11147 real definition first, and we can just use the same value. */
11148 if (h->u.weakdef != NULL)
11149 {
11150 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11151 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11152 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11153 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11154 return TRUE;
11155 }
11156
11157 /* If there are no non-GOT references, we do not need a copy
11158 relocation. */
11159 if (!h->non_got_ref)
11160 return TRUE;
11161
11162 /* This is a reference to a symbol defined by a dynamic object which
11163 is not a function. */
11164
11165 /* If we are creating a shared library, we must presume that the
11166 only references to the symbol are via the global offset table.
11167 For such cases we need not do anything here; the relocations will
11168 be handled correctly by relocate_section. Relocatable executables
11169 can reference data in shared objects directly, so we don't need to
11170 do anything here. */
11171 if (info->shared || globals->root.is_relocatable_executable)
11172 return TRUE;
11173
11174 if (h->size == 0)
11175 {
11176 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11177 h->root.root.string);
11178 return TRUE;
11179 }
11180
11181 /* We must allocate the symbol in our .dynbss section, which will
11182 become part of the .bss section of the executable. There will be
11183 an entry for this symbol in the .dynsym section. The dynamic
11184 object will contain position independent code, so all references
11185 from the dynamic object to this symbol will go through the global
11186 offset table. The dynamic linker will use the .dynsym entry to
11187 determine the address it must put in the global offset table, so
11188 both the dynamic object and the regular object will refer to the
11189 same memory location for the variable. */
11190 s = bfd_get_section_by_name (dynobj, ".dynbss");
11191 BFD_ASSERT (s != NULL);
11192
11193 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11194 copy the initial value out of the dynamic object and into the
11195 runtime process image. We need to remember the offset into the
11196 .rel(a).bss section we are going to use. */
11197 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11198 {
11199 asection *srel;
11200
11201 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11202 BFD_ASSERT (srel != NULL);
11203 srel->size += RELOC_SIZE (globals);
11204 h->needs_copy = 1;
11205 }
11206
11207 return _bfd_elf_adjust_dynamic_copy (h, s);
11208 }
11209
11210 /* Allocate space in .plt, .got and associated reloc sections for
11211 dynamic relocs. */
11212
11213 static bfd_boolean
11214 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11215 {
11216 struct bfd_link_info *info;
11217 struct elf32_arm_link_hash_table *htab;
11218 struct elf32_arm_link_hash_entry *eh;
11219 struct elf32_arm_relocs_copied *p;
11220 bfd_signed_vma thumb_refs;
11221
11222 eh = (struct elf32_arm_link_hash_entry *) h;
11223
11224 if (h->root.type == bfd_link_hash_indirect)
11225 return TRUE;
11226
11227 if (h->root.type == bfd_link_hash_warning)
11228 /* When warning symbols are created, they **replace** the "real"
11229 entry in the hash table, thus we never get to see the real
11230 symbol in a hash traversal. So look at it now. */
11231 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11232
11233 info = (struct bfd_link_info *) inf;
11234 htab = elf32_arm_hash_table (info);
11235
11236 if (htab->root.dynamic_sections_created
11237 && h->plt.refcount > 0)
11238 {
11239 /* Make sure this symbol is output as a dynamic symbol.
11240 Undefined weak syms won't yet be marked as dynamic. */
11241 if (h->dynindx == -1
11242 && !h->forced_local)
11243 {
11244 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11245 return FALSE;
11246 }
11247
11248 if (info->shared
11249 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11250 {
11251 asection *s = htab->splt;
11252
11253 /* If this is the first .plt entry, make room for the special
11254 first entry. */
11255 if (s->size == 0)
11256 s->size += htab->plt_header_size;
11257
11258 h->plt.offset = s->size;
11259
11260 /* If we will insert a Thumb trampoline before this PLT, leave room
11261 for it. */
11262 thumb_refs = eh->plt_thumb_refcount;
11263 if (!htab->use_blx)
11264 thumb_refs += eh->plt_maybe_thumb_refcount;
11265
11266 if (thumb_refs > 0)
11267 {
11268 h->plt.offset += PLT_THUMB_STUB_SIZE;
11269 s->size += PLT_THUMB_STUB_SIZE;
11270 }
11271
11272 /* If this symbol is not defined in a regular file, and we are
11273 not generating a shared library, then set the symbol to this
11274 location in the .plt. This is required to make function
11275 pointers compare as equal between the normal executable and
11276 the shared library. */
11277 if (! info->shared
11278 && !h->def_regular)
11279 {
11280 h->root.u.def.section = s;
11281 h->root.u.def.value = h->plt.offset;
11282
11283 /* Make sure the function is not marked as Thumb, in case
11284 it is the target of an ABS32 relocation, which will
11285 point to the PLT entry. */
11286 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11287 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11288 }
11289
11290 /* Make room for this entry. */
11291 s->size += htab->plt_entry_size;
11292
11293 if (!htab->symbian_p)
11294 {
11295 /* We also need to make an entry in the .got.plt section, which
11296 will be placed in the .got section by the linker script. */
11297 eh->plt_got_offset = htab->sgotplt->size;
11298 htab->sgotplt->size += 4;
11299 }
11300
11301 /* We also need to make an entry in the .rel(a).plt section. */
11302 htab->srelplt->size += RELOC_SIZE (htab);
11303
11304 /* VxWorks executables have a second set of relocations for
11305 each PLT entry. They go in a separate relocation section,
11306 which is processed by the kernel loader. */
11307 if (htab->vxworks_p && !info->shared)
11308 {
11309 /* There is a relocation for the initial PLT entry:
11310 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11311 if (h->plt.offset == htab->plt_header_size)
11312 htab->srelplt2->size += RELOC_SIZE (htab);
11313
11314 /* There are two extra relocations for each subsequent
11315 PLT entry: an R_ARM_32 relocation for the GOT entry,
11316 and an R_ARM_32 relocation for the PLT entry. */
11317 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11318 }
11319 }
11320 else
11321 {
11322 h->plt.offset = (bfd_vma) -1;
11323 h->needs_plt = 0;
11324 }
11325 }
11326 else
11327 {
11328 h->plt.offset = (bfd_vma) -1;
11329 h->needs_plt = 0;
11330 }
11331
11332 if (h->got.refcount > 0)
11333 {
11334 asection *s;
11335 bfd_boolean dyn;
11336 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11337 int indx;
11338
11339 /* Make sure this symbol is output as a dynamic symbol.
11340 Undefined weak syms won't yet be marked as dynamic. */
11341 if (h->dynindx == -1
11342 && !h->forced_local)
11343 {
11344 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11345 return FALSE;
11346 }
11347
11348 if (!htab->symbian_p)
11349 {
11350 s = htab->sgot;
11351 h->got.offset = s->size;
11352
11353 if (tls_type == GOT_UNKNOWN)
11354 abort ();
11355
11356 if (tls_type == GOT_NORMAL)
11357 /* Non-TLS symbols need one GOT slot. */
11358 s->size += 4;
11359 else
11360 {
11361 if (tls_type & GOT_TLS_GD)
11362 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11363 s->size += 8;
11364 if (tls_type & GOT_TLS_IE)
11365 /* R_ARM_TLS_IE32 needs one GOT slot. */
11366 s->size += 4;
11367 }
11368
11369 dyn = htab->root.dynamic_sections_created;
11370
11371 indx = 0;
11372 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11373 && (!info->shared
11374 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11375 indx = h->dynindx;
11376
11377 if (tls_type != GOT_NORMAL
11378 && (info->shared || indx != 0)
11379 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11380 || h->root.type != bfd_link_hash_undefweak))
11381 {
11382 if (tls_type & GOT_TLS_IE)
11383 htab->srelgot->size += RELOC_SIZE (htab);
11384
11385 if (tls_type & GOT_TLS_GD)
11386 htab->srelgot->size += RELOC_SIZE (htab);
11387
11388 if ((tls_type & GOT_TLS_GD) && indx != 0)
11389 htab->srelgot->size += RELOC_SIZE (htab);
11390 }
11391 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11392 || h->root.type != bfd_link_hash_undefweak)
11393 && (info->shared
11394 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11395 htab->srelgot->size += RELOC_SIZE (htab);
11396 }
11397 }
11398 else
11399 h->got.offset = (bfd_vma) -1;
11400
11401 /* Allocate stubs for exported Thumb functions on v4t. */
11402 if (!htab->use_blx && h->dynindx != -1
11403 && h->def_regular
11404 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11405 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11406 {
11407 struct elf_link_hash_entry * th;
11408 struct bfd_link_hash_entry * bh;
11409 struct elf_link_hash_entry * myh;
11410 char name[1024];
11411 asection *s;
11412 bh = NULL;
11413 /* Create a new symbol to regist the real location of the function. */
11414 s = h->root.u.def.section;
11415 sprintf (name, "__real_%s", h->root.root.string);
11416 _bfd_generic_link_add_one_symbol (info, s->owner,
11417 name, BSF_GLOBAL, s,
11418 h->root.u.def.value,
11419 NULL, TRUE, FALSE, &bh);
11420
11421 myh = (struct elf_link_hash_entry *) bh;
11422 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11423 myh->forced_local = 1;
11424 eh->export_glue = myh;
11425 th = record_arm_to_thumb_glue (info, h);
11426 /* Point the symbol at the stub. */
11427 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11428 h->root.u.def.section = th->root.u.def.section;
11429 h->root.u.def.value = th->root.u.def.value & ~1;
11430 }
11431
11432 if (eh->relocs_copied == NULL)
11433 return TRUE;
11434
11435 /* In the shared -Bsymbolic case, discard space allocated for
11436 dynamic pc-relative relocs against symbols which turn out to be
11437 defined in regular objects. For the normal shared case, discard
11438 space for pc-relative relocs that have become local due to symbol
11439 visibility changes. */
11440
11441 if (info->shared || htab->root.is_relocatable_executable)
11442 {
11443 /* The only relocs that use pc_count are R_ARM_REL32 and
11444 R_ARM_REL32_NOI, which will appear on something like
11445 ".long foo - .". We want calls to protected symbols to resolve
11446 directly to the function rather than going via the plt. If people
11447 want function pointer comparisons to work as expected then they
11448 should avoid writing assembly like ".long foo - .". */
11449 if (SYMBOL_CALLS_LOCAL (info, h))
11450 {
11451 struct elf32_arm_relocs_copied **pp;
11452
11453 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11454 {
11455 p->count -= p->pc_count;
11456 p->pc_count = 0;
11457 if (p->count == 0)
11458 *pp = p->next;
11459 else
11460 pp = &p->next;
11461 }
11462 }
11463
11464 if (elf32_arm_hash_table (info)->vxworks_p)
11465 {
11466 struct elf32_arm_relocs_copied **pp;
11467
11468 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11469 {
11470 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11471 *pp = p->next;
11472 else
11473 pp = &p->next;
11474 }
11475 }
11476
11477 /* Also discard relocs on undefined weak syms with non-default
11478 visibility. */
11479 if (eh->relocs_copied != NULL
11480 && h->root.type == bfd_link_hash_undefweak)
11481 {
11482 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11483 eh->relocs_copied = NULL;
11484
11485 /* Make sure undefined weak symbols are output as a dynamic
11486 symbol in PIEs. */
11487 else if (h->dynindx == -1
11488 && !h->forced_local)
11489 {
11490 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11491 return FALSE;
11492 }
11493 }
11494
11495 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11496 && h->root.type == bfd_link_hash_new)
11497 {
11498 /* Output absolute symbols so that we can create relocations
11499 against them. For normal symbols we output a relocation
11500 against the section that contains them. */
11501 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11502 return FALSE;
11503 }
11504
11505 }
11506 else
11507 {
11508 /* For the non-shared case, discard space for relocs against
11509 symbols which turn out to need copy relocs or are not
11510 dynamic. */
11511
11512 if (!h->non_got_ref
11513 && ((h->def_dynamic
11514 && !h->def_regular)
11515 || (htab->root.dynamic_sections_created
11516 && (h->root.type == bfd_link_hash_undefweak
11517 || h->root.type == bfd_link_hash_undefined))))
11518 {
11519 /* Make sure this symbol is output as a dynamic symbol.
11520 Undefined weak syms won't yet be marked as dynamic. */
11521 if (h->dynindx == -1
11522 && !h->forced_local)
11523 {
11524 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11525 return FALSE;
11526 }
11527
11528 /* If that succeeded, we know we'll be keeping all the
11529 relocs. */
11530 if (h->dynindx != -1)
11531 goto keep;
11532 }
11533
11534 eh->relocs_copied = NULL;
11535
11536 keep: ;
11537 }
11538
11539 /* Finally, allocate space. */
11540 for (p = eh->relocs_copied; p != NULL; p = p->next)
11541 {
11542 asection *sreloc = elf_section_data (p->section)->sreloc;
11543 sreloc->size += p->count * RELOC_SIZE (htab);
11544 }
11545
11546 return TRUE;
11547 }
11548
11549 /* Find any dynamic relocs that apply to read-only sections. */
11550
11551 static bfd_boolean
11552 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11553 {
11554 struct elf32_arm_link_hash_entry * eh;
11555 struct elf32_arm_relocs_copied * p;
11556
11557 if (h->root.type == bfd_link_hash_warning)
11558 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11559
11560 eh = (struct elf32_arm_link_hash_entry *) h;
11561 for (p = eh->relocs_copied; p != NULL; p = p->next)
11562 {
11563 asection *s = p->section;
11564
11565 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11566 {
11567 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11568
11569 info->flags |= DF_TEXTREL;
11570
11571 /* Not an error, just cut short the traversal. */
11572 return FALSE;
11573 }
11574 }
11575 return TRUE;
11576 }
11577
11578 void
11579 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11580 int byteswap_code)
11581 {
11582 struct elf32_arm_link_hash_table *globals;
11583
11584 globals = elf32_arm_hash_table (info);
11585 globals->byteswap_code = byteswap_code;
11586 }
11587
11588 /* Set the sizes of the dynamic sections. */
11589
11590 static bfd_boolean
11591 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11592 struct bfd_link_info * info)
11593 {
11594 bfd * dynobj;
11595 asection * s;
11596 bfd_boolean plt;
11597 bfd_boolean relocs;
11598 bfd *ibfd;
11599 struct elf32_arm_link_hash_table *htab;
11600
11601 htab = elf32_arm_hash_table (info);
11602 dynobj = elf_hash_table (info)->dynobj;
11603 BFD_ASSERT (dynobj != NULL);
11604 check_use_blx (htab);
11605
11606 if (elf_hash_table (info)->dynamic_sections_created)
11607 {
11608 /* Set the contents of the .interp section to the interpreter. */
11609 if (info->executable)
11610 {
11611 s = bfd_get_section_by_name (dynobj, ".interp");
11612 BFD_ASSERT (s != NULL);
11613 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11614 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11615 }
11616 }
11617
11618 /* Set up .got offsets for local syms, and space for local dynamic
11619 relocs. */
11620 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11621 {
11622 bfd_signed_vma *local_got;
11623 bfd_signed_vma *end_local_got;
11624 char *local_tls_type;
11625 bfd_size_type locsymcount;
11626 Elf_Internal_Shdr *symtab_hdr;
11627 asection *srel;
11628 bfd_boolean is_vxworks = elf32_arm_hash_table (info)->vxworks_p;
11629
11630 if (! is_arm_elf (ibfd))
11631 continue;
11632
11633 for (s = ibfd->sections; s != NULL; s = s->next)
11634 {
11635 struct elf32_arm_relocs_copied *p;
11636
11637 for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11638 {
11639 if (!bfd_is_abs_section (p->section)
11640 && bfd_is_abs_section (p->section->output_section))
11641 {
11642 /* Input section has been discarded, either because
11643 it is a copy of a linkonce section or due to
11644 linker script /DISCARD/, so we'll be discarding
11645 the relocs too. */
11646 }
11647 else if (is_vxworks
11648 && strcmp (p->section->output_section->name,
11649 ".tls_vars") == 0)
11650 {
11651 /* Relocations in vxworks .tls_vars sections are
11652 handled specially by the loader. */
11653 }
11654 else if (p->count != 0)
11655 {
11656 srel = elf_section_data (p->section)->sreloc;
11657 srel->size += p->count * RELOC_SIZE (htab);
11658 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11659 info->flags |= DF_TEXTREL;
11660 }
11661 }
11662 }
11663
11664 local_got = elf_local_got_refcounts (ibfd);
11665 if (!local_got)
11666 continue;
11667
11668 symtab_hdr = & elf_symtab_hdr (ibfd);
11669 locsymcount = symtab_hdr->sh_info;
11670 end_local_got = local_got + locsymcount;
11671 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11672 s = htab->sgot;
11673 srel = htab->srelgot;
11674 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11675 {
11676 if (*local_got > 0)
11677 {
11678 *local_got = s->size;
11679 if (*local_tls_type & GOT_TLS_GD)
11680 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11681 s->size += 8;
11682 if (*local_tls_type & GOT_TLS_IE)
11683 s->size += 4;
11684 if (*local_tls_type == GOT_NORMAL)
11685 s->size += 4;
11686
11687 if (info->shared || *local_tls_type == GOT_TLS_GD)
11688 srel->size += RELOC_SIZE (htab);
11689 }
11690 else
11691 *local_got = (bfd_vma) -1;
11692 }
11693 }
11694
11695 if (htab->tls_ldm_got.refcount > 0)
11696 {
11697 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11698 for R_ARM_TLS_LDM32 relocations. */
11699 htab->tls_ldm_got.offset = htab->sgot->size;
11700 htab->sgot->size += 8;
11701 if (info->shared)
11702 htab->srelgot->size += RELOC_SIZE (htab);
11703 }
11704 else
11705 htab->tls_ldm_got.offset = -1;
11706
11707 /* Allocate global sym .plt and .got entries, and space for global
11708 sym dynamic relocs. */
11709 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11710
11711 /* Here we rummage through the found bfds to collect glue information. */
11712 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11713 {
11714 if (! is_arm_elf (ibfd))
11715 continue;
11716
11717 /* Initialise mapping tables for code/data. */
11718 bfd_elf32_arm_init_maps (ibfd);
11719
11720 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11721 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11722 /* xgettext:c-format */
11723 _bfd_error_handler (_("Errors encountered processing file %s"),
11724 ibfd->filename);
11725 }
11726
11727 /* Allocate space for the glue sections now that we've sized them. */
11728 bfd_elf32_arm_allocate_interworking_sections (info);
11729
11730 /* The check_relocs and adjust_dynamic_symbol entry points have
11731 determined the sizes of the various dynamic sections. Allocate
11732 memory for them. */
11733 plt = FALSE;
11734 relocs = FALSE;
11735 for (s = dynobj->sections; s != NULL; s = s->next)
11736 {
11737 const char * name;
11738
11739 if ((s->flags & SEC_LINKER_CREATED) == 0)
11740 continue;
11741
11742 /* It's OK to base decisions on the section name, because none
11743 of the dynobj section names depend upon the input files. */
11744 name = bfd_get_section_name (dynobj, s);
11745
11746 if (strcmp (name, ".plt") == 0)
11747 {
11748 /* Remember whether there is a PLT. */
11749 plt = s->size != 0;
11750 }
11751 else if (CONST_STRNEQ (name, ".rel"))
11752 {
11753 if (s->size != 0)
11754 {
11755 /* Remember whether there are any reloc sections other
11756 than .rel(a).plt and .rela.plt.unloaded. */
11757 if (s != htab->srelplt && s != htab->srelplt2)
11758 relocs = TRUE;
11759
11760 /* We use the reloc_count field as a counter if we need
11761 to copy relocs into the output file. */
11762 s->reloc_count = 0;
11763 }
11764 }
11765 else if (! CONST_STRNEQ (name, ".got")
11766 && strcmp (name, ".dynbss") != 0)
11767 {
11768 /* It's not one of our sections, so don't allocate space. */
11769 continue;
11770 }
11771
11772 if (s->size == 0)
11773 {
11774 /* If we don't need this section, strip it from the
11775 output file. This is mostly to handle .rel(a).bss and
11776 .rel(a).plt. We must create both sections in
11777 create_dynamic_sections, because they must be created
11778 before the linker maps input sections to output
11779 sections. The linker does that before
11780 adjust_dynamic_symbol is called, and it is that
11781 function which decides whether anything needs to go
11782 into these sections. */
11783 s->flags |= SEC_EXCLUDE;
11784 continue;
11785 }
11786
11787 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11788 continue;
11789
11790 /* Allocate memory for the section contents. */
11791 s->contents = bfd_zalloc (dynobj, s->size);
11792 if (s->contents == NULL)
11793 return FALSE;
11794 }
11795
11796 if (elf_hash_table (info)->dynamic_sections_created)
11797 {
11798 /* Add some entries to the .dynamic section. We fill in the
11799 values later, in elf32_arm_finish_dynamic_sections, but we
11800 must add the entries now so that we get the correct size for
11801 the .dynamic section. The DT_DEBUG entry is filled in by the
11802 dynamic linker and used by the debugger. */
11803 #define add_dynamic_entry(TAG, VAL) \
11804 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11805
11806 if (info->executable)
11807 {
11808 if (!add_dynamic_entry (DT_DEBUG, 0))
11809 return FALSE;
11810 }
11811
11812 if (plt)
11813 {
11814 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11815 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11816 || !add_dynamic_entry (DT_PLTREL,
11817 htab->use_rel ? DT_REL : DT_RELA)
11818 || !add_dynamic_entry (DT_JMPREL, 0))
11819 return FALSE;
11820 }
11821
11822 if (relocs)
11823 {
11824 if (htab->use_rel)
11825 {
11826 if (!add_dynamic_entry (DT_REL, 0)
11827 || !add_dynamic_entry (DT_RELSZ, 0)
11828 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11829 return FALSE;
11830 }
11831 else
11832 {
11833 if (!add_dynamic_entry (DT_RELA, 0)
11834 || !add_dynamic_entry (DT_RELASZ, 0)
11835 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11836 return FALSE;
11837 }
11838 }
11839
11840 /* If any dynamic relocs apply to a read-only section,
11841 then we need a DT_TEXTREL entry. */
11842 if ((info->flags & DF_TEXTREL) == 0)
11843 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11844 info);
11845
11846 if ((info->flags & DF_TEXTREL) != 0)
11847 {
11848 if (!add_dynamic_entry (DT_TEXTREL, 0))
11849 return FALSE;
11850 }
11851 if (htab->vxworks_p
11852 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11853 return FALSE;
11854 }
11855 #undef add_dynamic_entry
11856
11857 return TRUE;
11858 }
11859
11860 /* Finish up dynamic symbol handling. We set the contents of various
11861 dynamic sections here. */
11862
11863 static bfd_boolean
11864 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11865 struct bfd_link_info * info,
11866 struct elf_link_hash_entry * h,
11867 Elf_Internal_Sym * sym)
11868 {
11869 bfd * dynobj;
11870 struct elf32_arm_link_hash_table *htab;
11871 struct elf32_arm_link_hash_entry *eh;
11872
11873 dynobj = elf_hash_table (info)->dynobj;
11874 htab = elf32_arm_hash_table (info);
11875 eh = (struct elf32_arm_link_hash_entry *) h;
11876
11877 if (h->plt.offset != (bfd_vma) -1)
11878 {
11879 asection * splt;
11880 asection * srel;
11881 bfd_byte *loc;
11882 bfd_vma plt_index;
11883 Elf_Internal_Rela rel;
11884
11885 /* This symbol has an entry in the procedure linkage table. Set
11886 it up. */
11887
11888 BFD_ASSERT (h->dynindx != -1);
11889
11890 splt = bfd_get_section_by_name (dynobj, ".plt");
11891 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11892 BFD_ASSERT (splt != NULL && srel != NULL);
11893
11894 /* Fill in the entry in the procedure linkage table. */
11895 if (htab->symbian_p)
11896 {
11897 put_arm_insn (htab, output_bfd,
11898 elf32_arm_symbian_plt_entry[0],
11899 splt->contents + h->plt.offset);
11900 bfd_put_32 (output_bfd,
11901 elf32_arm_symbian_plt_entry[1],
11902 splt->contents + h->plt.offset + 4);
11903
11904 /* Fill in the entry in the .rel.plt section. */
11905 rel.r_offset = (splt->output_section->vma
11906 + splt->output_offset
11907 + h->plt.offset + 4);
11908 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11909
11910 /* Get the index in the procedure linkage table which
11911 corresponds to this symbol. This is the index of this symbol
11912 in all the symbols for which we are making plt entries. The
11913 first entry in the procedure linkage table is reserved. */
11914 plt_index = ((h->plt.offset - htab->plt_header_size)
11915 / htab->plt_entry_size);
11916 }
11917 else
11918 {
11919 bfd_vma got_offset, got_address, plt_address;
11920 bfd_vma got_displacement;
11921 asection * sgot;
11922 bfd_byte * ptr;
11923
11924 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
11925 BFD_ASSERT (sgot != NULL);
11926
11927 /* Get the offset into the .got.plt table of the entry that
11928 corresponds to this function. */
11929 got_offset = eh->plt_got_offset;
11930
11931 /* Get the index in the procedure linkage table which
11932 corresponds to this symbol. This is the index of this symbol
11933 in all the symbols for which we are making plt entries. The
11934 first three entries in .got.plt are reserved; after that
11935 symbols appear in the same order as in .plt. */
11936 plt_index = (got_offset - 12) / 4;
11937
11938 /* Calculate the address of the GOT entry. */
11939 got_address = (sgot->output_section->vma
11940 + sgot->output_offset
11941 + got_offset);
11942
11943 /* ...and the address of the PLT entry. */
11944 plt_address = (splt->output_section->vma
11945 + splt->output_offset
11946 + h->plt.offset);
11947
11948 ptr = htab->splt->contents + h->plt.offset;
11949 if (htab->vxworks_p && info->shared)
11950 {
11951 unsigned int i;
11952 bfd_vma val;
11953
11954 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11955 {
11956 val = elf32_arm_vxworks_shared_plt_entry[i];
11957 if (i == 2)
11958 val |= got_address - sgot->output_section->vma;
11959 if (i == 5)
11960 val |= plt_index * RELOC_SIZE (htab);
11961 if (i == 2 || i == 5)
11962 bfd_put_32 (output_bfd, val, ptr);
11963 else
11964 put_arm_insn (htab, output_bfd, val, ptr);
11965 }
11966 }
11967 else if (htab->vxworks_p)
11968 {
11969 unsigned int i;
11970 bfd_vma val;
11971
11972 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
11973 {
11974 val = elf32_arm_vxworks_exec_plt_entry[i];
11975 if (i == 2)
11976 val |= got_address;
11977 if (i == 4)
11978 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
11979 if (i == 5)
11980 val |= plt_index * RELOC_SIZE (htab);
11981 if (i == 2 || i == 5)
11982 bfd_put_32 (output_bfd, val, ptr);
11983 else
11984 put_arm_insn (htab, output_bfd, val, ptr);
11985 }
11986
11987 loc = (htab->srelplt2->contents
11988 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
11989
11990 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
11991 referencing the GOT for this PLT entry. */
11992 rel.r_offset = plt_address + 8;
11993 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
11994 rel.r_addend = got_offset;
11995 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
11996 loc += RELOC_SIZE (htab);
11997
11998 /* Create the R_ARM_ABS32 relocation referencing the
11999 beginning of the PLT for this GOT entry. */
12000 rel.r_offset = got_address;
12001 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12002 rel.r_addend = 0;
12003 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12004 }
12005 else
12006 {
12007 bfd_signed_vma thumb_refs;
12008 /* Calculate the displacement between the PLT slot and the
12009 entry in the GOT. The eight-byte offset accounts for the
12010 value produced by adding to pc in the first instruction
12011 of the PLT stub. */
12012 got_displacement = got_address - (plt_address + 8);
12013
12014 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12015
12016 thumb_refs = eh->plt_thumb_refcount;
12017 if (!htab->use_blx)
12018 thumb_refs += eh->plt_maybe_thumb_refcount;
12019
12020 if (thumb_refs > 0)
12021 {
12022 put_thumb_insn (htab, output_bfd,
12023 elf32_arm_plt_thumb_stub[0], ptr - 4);
12024 put_thumb_insn (htab, output_bfd,
12025 elf32_arm_plt_thumb_stub[1], ptr - 2);
12026 }
12027
12028 put_arm_insn (htab, output_bfd,
12029 elf32_arm_plt_entry[0]
12030 | ((got_displacement & 0x0ff00000) >> 20),
12031 ptr + 0);
12032 put_arm_insn (htab, output_bfd,
12033 elf32_arm_plt_entry[1]
12034 | ((got_displacement & 0x000ff000) >> 12),
12035 ptr+ 4);
12036 put_arm_insn (htab, output_bfd,
12037 elf32_arm_plt_entry[2]
12038 | (got_displacement & 0x00000fff),
12039 ptr + 8);
12040 #ifdef FOUR_WORD_PLT
12041 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12042 #endif
12043 }
12044
12045 /* Fill in the entry in the global offset table. */
12046 bfd_put_32 (output_bfd,
12047 (splt->output_section->vma
12048 + splt->output_offset),
12049 sgot->contents + got_offset);
12050
12051 /* Fill in the entry in the .rel(a).plt section. */
12052 rel.r_addend = 0;
12053 rel.r_offset = got_address;
12054 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12055 }
12056
12057 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12058 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12059
12060 if (!h->def_regular)
12061 {
12062 /* Mark the symbol as undefined, rather than as defined in
12063 the .plt section. Leave the value alone. */
12064 sym->st_shndx = SHN_UNDEF;
12065 /* If the symbol is weak, we do need to clear the value.
12066 Otherwise, the PLT entry would provide a definition for
12067 the symbol even if the symbol wasn't defined anywhere,
12068 and so the symbol would never be NULL. */
12069 if (!h->ref_regular_nonweak)
12070 sym->st_value = 0;
12071 }
12072 }
12073
12074 if (h->got.offset != (bfd_vma) -1
12075 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12076 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12077 {
12078 asection * sgot;
12079 asection * srel;
12080 Elf_Internal_Rela rel;
12081 bfd_byte *loc;
12082 bfd_vma offset;
12083
12084 /* This symbol has an entry in the global offset table. Set it
12085 up. */
12086 sgot = bfd_get_section_by_name (dynobj, ".got");
12087 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12088 BFD_ASSERT (sgot != NULL && srel != NULL);
12089
12090 offset = (h->got.offset & ~(bfd_vma) 1);
12091 rel.r_addend = 0;
12092 rel.r_offset = (sgot->output_section->vma
12093 + sgot->output_offset
12094 + offset);
12095
12096 /* If this is a static link, or it is a -Bsymbolic link and the
12097 symbol is defined locally or was forced to be local because
12098 of a version file, we just want to emit a RELATIVE reloc.
12099 The entry in the global offset table will already have been
12100 initialized in the relocate_section function. */
12101 if (info->shared
12102 && SYMBOL_REFERENCES_LOCAL (info, h))
12103 {
12104 BFD_ASSERT ((h->got.offset & 1) != 0);
12105 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12106 if (!htab->use_rel)
12107 {
12108 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12109 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12110 }
12111 }
12112 else
12113 {
12114 BFD_ASSERT ((h->got.offset & 1) == 0);
12115 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12116 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12117 }
12118
12119 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12120 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12121 }
12122
12123 if (h->needs_copy)
12124 {
12125 asection * s;
12126 Elf_Internal_Rela rel;
12127 bfd_byte *loc;
12128
12129 /* This symbol needs a copy reloc. Set it up. */
12130 BFD_ASSERT (h->dynindx != -1
12131 && (h->root.type == bfd_link_hash_defined
12132 || h->root.type == bfd_link_hash_defweak));
12133
12134 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12135 RELOC_SECTION (htab, ".bss"));
12136 BFD_ASSERT (s != NULL);
12137
12138 rel.r_addend = 0;
12139 rel.r_offset = (h->root.u.def.value
12140 + h->root.u.def.section->output_section->vma
12141 + h->root.u.def.section->output_offset);
12142 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12143 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12144 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12145 }
12146
12147 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12148 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12149 to the ".got" section. */
12150 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12151 || (!htab->vxworks_p && h == htab->root.hgot))
12152 sym->st_shndx = SHN_ABS;
12153
12154 return TRUE;
12155 }
12156
12157 /* Finish up the dynamic sections. */
12158
12159 static bfd_boolean
12160 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12161 {
12162 bfd * dynobj;
12163 asection * sgot;
12164 asection * sdyn;
12165
12166 dynobj = elf_hash_table (info)->dynobj;
12167
12168 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12169 BFD_ASSERT (elf32_arm_hash_table (info)->symbian_p || sgot != NULL);
12170 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12171
12172 if (elf_hash_table (info)->dynamic_sections_created)
12173 {
12174 asection *splt;
12175 Elf32_External_Dyn *dyncon, *dynconend;
12176 struct elf32_arm_link_hash_table *htab;
12177
12178 htab = elf32_arm_hash_table (info);
12179 splt = bfd_get_section_by_name (dynobj, ".plt");
12180 BFD_ASSERT (splt != NULL && sdyn != NULL);
12181
12182 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12183 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12184
12185 for (; dyncon < dynconend; dyncon++)
12186 {
12187 Elf_Internal_Dyn dyn;
12188 const char * name;
12189 asection * s;
12190
12191 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12192
12193 switch (dyn.d_tag)
12194 {
12195 unsigned int type;
12196
12197 default:
12198 if (htab->vxworks_p
12199 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12200 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12201 break;
12202
12203 case DT_HASH:
12204 name = ".hash";
12205 goto get_vma_if_bpabi;
12206 case DT_STRTAB:
12207 name = ".dynstr";
12208 goto get_vma_if_bpabi;
12209 case DT_SYMTAB:
12210 name = ".dynsym";
12211 goto get_vma_if_bpabi;
12212 case DT_VERSYM:
12213 name = ".gnu.version";
12214 goto get_vma_if_bpabi;
12215 case DT_VERDEF:
12216 name = ".gnu.version_d";
12217 goto get_vma_if_bpabi;
12218 case DT_VERNEED:
12219 name = ".gnu.version_r";
12220 goto get_vma_if_bpabi;
12221
12222 case DT_PLTGOT:
12223 name = ".got";
12224 goto get_vma;
12225 case DT_JMPREL:
12226 name = RELOC_SECTION (htab, ".plt");
12227 get_vma:
12228 s = bfd_get_section_by_name (output_bfd, name);
12229 BFD_ASSERT (s != NULL);
12230 if (!htab->symbian_p)
12231 dyn.d_un.d_ptr = s->vma;
12232 else
12233 /* In the BPABI, tags in the PT_DYNAMIC section point
12234 at the file offset, not the memory address, for the
12235 convenience of the post linker. */
12236 dyn.d_un.d_ptr = s->filepos;
12237 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12238 break;
12239
12240 get_vma_if_bpabi:
12241 if (htab->symbian_p)
12242 goto get_vma;
12243 break;
12244
12245 case DT_PLTRELSZ:
12246 s = bfd_get_section_by_name (output_bfd,
12247 RELOC_SECTION (htab, ".plt"));
12248 BFD_ASSERT (s != NULL);
12249 dyn.d_un.d_val = s->size;
12250 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12251 break;
12252
12253 case DT_RELSZ:
12254 case DT_RELASZ:
12255 if (!htab->symbian_p)
12256 {
12257 /* My reading of the SVR4 ABI indicates that the
12258 procedure linkage table relocs (DT_JMPREL) should be
12259 included in the overall relocs (DT_REL). This is
12260 what Solaris does. However, UnixWare can not handle
12261 that case. Therefore, we override the DT_RELSZ entry
12262 here to make it not include the JMPREL relocs. Since
12263 the linker script arranges for .rel(a).plt to follow all
12264 other relocation sections, we don't have to worry
12265 about changing the DT_REL entry. */
12266 s = bfd_get_section_by_name (output_bfd,
12267 RELOC_SECTION (htab, ".plt"));
12268 if (s != NULL)
12269 dyn.d_un.d_val -= s->size;
12270 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12271 break;
12272 }
12273 /* Fall through. */
12274
12275 case DT_REL:
12276 case DT_RELA:
12277 /* In the BPABI, the DT_REL tag must point at the file
12278 offset, not the VMA, of the first relocation
12279 section. So, we use code similar to that in
12280 elflink.c, but do not check for SHF_ALLOC on the
12281 relcoation section, since relocations sections are
12282 never allocated under the BPABI. The comments above
12283 about Unixware notwithstanding, we include all of the
12284 relocations here. */
12285 if (htab->symbian_p)
12286 {
12287 unsigned int i;
12288 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12289 ? SHT_REL : SHT_RELA);
12290 dyn.d_un.d_val = 0;
12291 for (i = 1; i < elf_numsections (output_bfd); i++)
12292 {
12293 Elf_Internal_Shdr *hdr
12294 = elf_elfsections (output_bfd)[i];
12295 if (hdr->sh_type == type)
12296 {
12297 if (dyn.d_tag == DT_RELSZ
12298 || dyn.d_tag == DT_RELASZ)
12299 dyn.d_un.d_val += hdr->sh_size;
12300 else if ((ufile_ptr) hdr->sh_offset
12301 <= dyn.d_un.d_val - 1)
12302 dyn.d_un.d_val = hdr->sh_offset;
12303 }
12304 }
12305 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12306 }
12307 break;
12308
12309 /* Set the bottom bit of DT_INIT/FINI if the
12310 corresponding function is Thumb. */
12311 case DT_INIT:
12312 name = info->init_function;
12313 goto get_sym;
12314 case DT_FINI:
12315 name = info->fini_function;
12316 get_sym:
12317 /* If it wasn't set by elf_bfd_final_link
12318 then there is nothing to adjust. */
12319 if (dyn.d_un.d_val != 0)
12320 {
12321 struct elf_link_hash_entry * eh;
12322
12323 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12324 FALSE, FALSE, TRUE);
12325 if (eh != NULL
12326 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12327 {
12328 dyn.d_un.d_val |= 1;
12329 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12330 }
12331 }
12332 break;
12333 }
12334 }
12335
12336 /* Fill in the first entry in the procedure linkage table. */
12337 if (splt->size > 0 && elf32_arm_hash_table (info)->plt_header_size)
12338 {
12339 const bfd_vma *plt0_entry;
12340 bfd_vma got_address, plt_address, got_displacement;
12341
12342 /* Calculate the addresses of the GOT and PLT. */
12343 got_address = sgot->output_section->vma + sgot->output_offset;
12344 plt_address = splt->output_section->vma + splt->output_offset;
12345
12346 if (htab->vxworks_p)
12347 {
12348 /* The VxWorks GOT is relocated by the dynamic linker.
12349 Therefore, we must emit relocations rather than simply
12350 computing the values now. */
12351 Elf_Internal_Rela rel;
12352
12353 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12354 put_arm_insn (htab, output_bfd, plt0_entry[0],
12355 splt->contents + 0);
12356 put_arm_insn (htab, output_bfd, plt0_entry[1],
12357 splt->contents + 4);
12358 put_arm_insn (htab, output_bfd, plt0_entry[2],
12359 splt->contents + 8);
12360 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12361
12362 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12363 rel.r_offset = plt_address + 12;
12364 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12365 rel.r_addend = 0;
12366 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12367 htab->srelplt2->contents);
12368 }
12369 else
12370 {
12371 got_displacement = got_address - (plt_address + 16);
12372
12373 plt0_entry = elf32_arm_plt0_entry;
12374 put_arm_insn (htab, output_bfd, plt0_entry[0],
12375 splt->contents + 0);
12376 put_arm_insn (htab, output_bfd, plt0_entry[1],
12377 splt->contents + 4);
12378 put_arm_insn (htab, output_bfd, plt0_entry[2],
12379 splt->contents + 8);
12380 put_arm_insn (htab, output_bfd, plt0_entry[3],
12381 splt->contents + 12);
12382
12383 #ifdef FOUR_WORD_PLT
12384 /* The displacement value goes in the otherwise-unused
12385 last word of the second entry. */
12386 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12387 #else
12388 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12389 #endif
12390 }
12391 }
12392
12393 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12394 really seem like the right value. */
12395 if (splt->output_section->owner == output_bfd)
12396 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12397
12398 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12399 {
12400 /* Correct the .rel(a).plt.unloaded relocations. They will have
12401 incorrect symbol indexes. */
12402 int num_plts;
12403 unsigned char *p;
12404
12405 num_plts = ((htab->splt->size - htab->plt_header_size)
12406 / htab->plt_entry_size);
12407 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12408
12409 for (; num_plts; num_plts--)
12410 {
12411 Elf_Internal_Rela rel;
12412
12413 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12414 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12415 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12416 p += RELOC_SIZE (htab);
12417
12418 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12419 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12420 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12421 p += RELOC_SIZE (htab);
12422 }
12423 }
12424 }
12425
12426 /* Fill in the first three entries in the global offset table. */
12427 if (sgot)
12428 {
12429 if (sgot->size > 0)
12430 {
12431 if (sdyn == NULL)
12432 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12433 else
12434 bfd_put_32 (output_bfd,
12435 sdyn->output_section->vma + sdyn->output_offset,
12436 sgot->contents);
12437 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12438 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12439 }
12440
12441 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12442 }
12443
12444 return TRUE;
12445 }
12446
12447 static void
12448 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12449 {
12450 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12451 struct elf32_arm_link_hash_table *globals;
12452
12453 i_ehdrp = elf_elfheader (abfd);
12454
12455 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12456 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12457 else
12458 i_ehdrp->e_ident[EI_OSABI] = 0;
12459 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12460
12461 if (link_info)
12462 {
12463 globals = elf32_arm_hash_table (link_info);
12464 if (globals->byteswap_code)
12465 i_ehdrp->e_flags |= EF_ARM_BE8;
12466 }
12467 }
12468
12469 static enum elf_reloc_type_class
12470 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12471 {
12472 switch ((int) ELF32_R_TYPE (rela->r_info))
12473 {
12474 case R_ARM_RELATIVE:
12475 return reloc_class_relative;
12476 case R_ARM_JUMP_SLOT:
12477 return reloc_class_plt;
12478 case R_ARM_COPY:
12479 return reloc_class_copy;
12480 default:
12481 return reloc_class_normal;
12482 }
12483 }
12484
12485 /* Set the right machine number for an Arm ELF file. */
12486
12487 static bfd_boolean
12488 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12489 {
12490 if (hdr->sh_type == SHT_NOTE)
12491 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12492
12493 return TRUE;
12494 }
12495
12496 static void
12497 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12498 {
12499 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12500 }
12501
12502 /* Return TRUE if this is an unwinding table entry. */
12503
12504 static bfd_boolean
12505 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12506 {
12507 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12508 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12509 }
12510
12511
12512 /* Set the type and flags for an ARM section. We do this by
12513 the section name, which is a hack, but ought to work. */
12514
12515 static bfd_boolean
12516 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12517 {
12518 const char * name;
12519
12520 name = bfd_get_section_name (abfd, sec);
12521
12522 if (is_arm_elf_unwind_section_name (abfd, name))
12523 {
12524 hdr->sh_type = SHT_ARM_EXIDX;
12525 hdr->sh_flags |= SHF_LINK_ORDER;
12526 }
12527 return TRUE;
12528 }
12529
12530 /* Handle an ARM specific section when reading an object file. This is
12531 called when bfd_section_from_shdr finds a section with an unknown
12532 type. */
12533
12534 static bfd_boolean
12535 elf32_arm_section_from_shdr (bfd *abfd,
12536 Elf_Internal_Shdr * hdr,
12537 const char *name,
12538 int shindex)
12539 {
12540 /* There ought to be a place to keep ELF backend specific flags, but
12541 at the moment there isn't one. We just keep track of the
12542 sections by their name, instead. Fortunately, the ABI gives
12543 names for all the ARM specific sections, so we will probably get
12544 away with this. */
12545 switch (hdr->sh_type)
12546 {
12547 case SHT_ARM_EXIDX:
12548 case SHT_ARM_PREEMPTMAP:
12549 case SHT_ARM_ATTRIBUTES:
12550 break;
12551
12552 default:
12553 return FALSE;
12554 }
12555
12556 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12557 return FALSE;
12558
12559 return TRUE;
12560 }
12561
12562 /* A structure used to record a list of sections, independently
12563 of the next and prev fields in the asection structure. */
12564 typedef struct section_list
12565 {
12566 asection * sec;
12567 struct section_list * next;
12568 struct section_list * prev;
12569 }
12570 section_list;
12571
12572 /* Unfortunately we need to keep a list of sections for which
12573 an _arm_elf_section_data structure has been allocated. This
12574 is because it is possible for functions like elf32_arm_write_section
12575 to be called on a section which has had an elf_data_structure
12576 allocated for it (and so the used_by_bfd field is valid) but
12577 for which the ARM extended version of this structure - the
12578 _arm_elf_section_data structure - has not been allocated. */
12579 static section_list * sections_with_arm_elf_section_data = NULL;
12580
12581 static void
12582 record_section_with_arm_elf_section_data (asection * sec)
12583 {
12584 struct section_list * entry;
12585
12586 entry = bfd_malloc (sizeof (* entry));
12587 if (entry == NULL)
12588 return;
12589 entry->sec = sec;
12590 entry->next = sections_with_arm_elf_section_data;
12591 entry->prev = NULL;
12592 if (entry->next != NULL)
12593 entry->next->prev = entry;
12594 sections_with_arm_elf_section_data = entry;
12595 }
12596
12597 static struct section_list *
12598 find_arm_elf_section_entry (asection * sec)
12599 {
12600 struct section_list * entry;
12601 static struct section_list * last_entry = NULL;
12602
12603 /* This is a short cut for the typical case where the sections are added
12604 to the sections_with_arm_elf_section_data list in forward order and
12605 then looked up here in backwards order. This makes a real difference
12606 to the ld-srec/sec64k.exp linker test. */
12607 entry = sections_with_arm_elf_section_data;
12608 if (last_entry != NULL)
12609 {
12610 if (last_entry->sec == sec)
12611 entry = last_entry;
12612 else if (last_entry->next != NULL
12613 && last_entry->next->sec == sec)
12614 entry = last_entry->next;
12615 }
12616
12617 for (; entry; entry = entry->next)
12618 if (entry->sec == sec)
12619 break;
12620
12621 if (entry)
12622 /* Record the entry prior to this one - it is the entry we are most
12623 likely to want to locate next time. Also this way if we have been
12624 called from unrecord_section_with_arm_elf_section_data() we will not
12625 be caching a pointer that is about to be freed. */
12626 last_entry = entry->prev;
12627
12628 return entry;
12629 }
12630
12631 static _arm_elf_section_data *
12632 get_arm_elf_section_data (asection * sec)
12633 {
12634 struct section_list * entry;
12635
12636 entry = find_arm_elf_section_entry (sec);
12637
12638 if (entry)
12639 return elf32_arm_section_data (entry->sec);
12640 else
12641 return NULL;
12642 }
12643
12644 static void
12645 unrecord_section_with_arm_elf_section_data (asection * sec)
12646 {
12647 struct section_list * entry;
12648
12649 entry = find_arm_elf_section_entry (sec);
12650
12651 if (entry)
12652 {
12653 if (entry->prev != NULL)
12654 entry->prev->next = entry->next;
12655 if (entry->next != NULL)
12656 entry->next->prev = entry->prev;
12657 if (entry == sections_with_arm_elf_section_data)
12658 sections_with_arm_elf_section_data = entry->next;
12659 free (entry);
12660 }
12661 }
12662
12663
12664 typedef struct
12665 {
12666 void *finfo;
12667 struct bfd_link_info *info;
12668 asection *sec;
12669 int sec_shndx;
12670 int (*func) (void *, const char *, Elf_Internal_Sym *,
12671 asection *, struct elf_link_hash_entry *);
12672 } output_arch_syminfo;
12673
12674 enum map_symbol_type
12675 {
12676 ARM_MAP_ARM,
12677 ARM_MAP_THUMB,
12678 ARM_MAP_DATA
12679 };
12680
12681
12682 /* Output a single mapping symbol. */
12683
12684 static bfd_boolean
12685 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12686 enum map_symbol_type type,
12687 bfd_vma offset)
12688 {
12689 static const char *names[3] = {"$a", "$t", "$d"};
12690 struct elf32_arm_link_hash_table *htab;
12691 Elf_Internal_Sym sym;
12692
12693 htab = elf32_arm_hash_table (osi->info);
12694 sym.st_value = osi->sec->output_section->vma
12695 + osi->sec->output_offset
12696 + offset;
12697 sym.st_size = 0;
12698 sym.st_other = 0;
12699 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12700 sym.st_shndx = osi->sec_shndx;
12701 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12702 }
12703
12704
12705 /* Output mapping symbols for PLT entries associated with H. */
12706
12707 static bfd_boolean
12708 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12709 {
12710 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12711 struct elf32_arm_link_hash_table *htab;
12712 struct elf32_arm_link_hash_entry *eh;
12713 bfd_vma addr;
12714
12715 htab = elf32_arm_hash_table (osi->info);
12716
12717 if (h->root.type == bfd_link_hash_indirect)
12718 return TRUE;
12719
12720 if (h->root.type == bfd_link_hash_warning)
12721 /* When warning symbols are created, they **replace** the "real"
12722 entry in the hash table, thus we never get to see the real
12723 symbol in a hash traversal. So look at it now. */
12724 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12725
12726 if (h->plt.offset == (bfd_vma) -1)
12727 return TRUE;
12728
12729 eh = (struct elf32_arm_link_hash_entry *) h;
12730 addr = h->plt.offset;
12731 if (htab->symbian_p)
12732 {
12733 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12734 return FALSE;
12735 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12736 return FALSE;
12737 }
12738 else if (htab->vxworks_p)
12739 {
12740 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12741 return FALSE;
12742 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12743 return FALSE;
12744 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12745 return FALSE;
12746 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12747 return FALSE;
12748 }
12749 else
12750 {
12751 bfd_signed_vma thumb_refs;
12752
12753 thumb_refs = eh->plt_thumb_refcount;
12754 if (!htab->use_blx)
12755 thumb_refs += eh->plt_maybe_thumb_refcount;
12756
12757 if (thumb_refs > 0)
12758 {
12759 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12760 return FALSE;
12761 }
12762 #ifdef FOUR_WORD_PLT
12763 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12764 return FALSE;
12765 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12766 return FALSE;
12767 #else
12768 /* A three-word PLT with no Thumb thunk contains only Arm code,
12769 so only need to output a mapping symbol for the first PLT entry and
12770 entries with thumb thunks. */
12771 if (thumb_refs > 0 || addr == 20)
12772 {
12773 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12774 return FALSE;
12775 }
12776 #endif
12777 }
12778
12779 return TRUE;
12780 }
12781
12782 /* Output a single local symbol for a generated stub. */
12783
12784 static bfd_boolean
12785 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12786 bfd_vma offset, bfd_vma size)
12787 {
12788 struct elf32_arm_link_hash_table *htab;
12789 Elf_Internal_Sym sym;
12790
12791 htab = elf32_arm_hash_table (osi->info);
12792 sym.st_value = osi->sec->output_section->vma
12793 + osi->sec->output_offset
12794 + offset;
12795 sym.st_size = size;
12796 sym.st_other = 0;
12797 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12798 sym.st_shndx = osi->sec_shndx;
12799 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12800 }
12801
12802 static bfd_boolean
12803 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12804 void * in_arg)
12805 {
12806 struct elf32_arm_stub_hash_entry *stub_entry;
12807 struct bfd_link_info *info;
12808 struct elf32_arm_link_hash_table *htab;
12809 asection *stub_sec;
12810 bfd_vma addr;
12811 char *stub_name;
12812 output_arch_syminfo *osi;
12813 const insn_sequence *template;
12814 enum stub_insn_type prev_type;
12815 int size;
12816 int i;
12817 enum map_symbol_type sym_type;
12818
12819 /* Massage our args to the form they really have. */
12820 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12821 osi = (output_arch_syminfo *) in_arg;
12822
12823 info = osi->info;
12824
12825 htab = elf32_arm_hash_table (info);
12826 stub_sec = stub_entry->stub_sec;
12827
12828 /* Ensure this stub is attached to the current section being
12829 processed. */
12830 if (stub_sec != osi->sec)
12831 return TRUE;
12832
12833 addr = (bfd_vma) stub_entry->stub_offset;
12834 stub_name = stub_entry->output_name;
12835
12836 template = stub_entry->stub_template;
12837 switch (template[0].type)
12838 {
12839 case ARM_TYPE:
12840 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12841 return FALSE;
12842 break;
12843 case THUMB16_TYPE:
12844 case THUMB32_TYPE:
12845 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12846 stub_entry->stub_size))
12847 return FALSE;
12848 break;
12849 default:
12850 BFD_FAIL ();
12851 return 0;
12852 }
12853
12854 prev_type = DATA_TYPE;
12855 size = 0;
12856 for (i = 0; i < stub_entry->stub_template_size; i++)
12857 {
12858 switch (template[i].type)
12859 {
12860 case ARM_TYPE:
12861 sym_type = ARM_MAP_ARM;
12862 break;
12863
12864 case THUMB16_TYPE:
12865 case THUMB32_TYPE:
12866 sym_type = ARM_MAP_THUMB;
12867 break;
12868
12869 case DATA_TYPE:
12870 sym_type = ARM_MAP_DATA;
12871 break;
12872
12873 default:
12874 BFD_FAIL ();
12875 return FALSE;
12876 }
12877
12878 if (template[i].type != prev_type)
12879 {
12880 prev_type = template[i].type;
12881 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12882 return FALSE;
12883 }
12884
12885 switch (template[i].type)
12886 {
12887 case ARM_TYPE:
12888 case THUMB32_TYPE:
12889 size += 4;
12890 break;
12891
12892 case THUMB16_TYPE:
12893 size += 2;
12894 break;
12895
12896 case DATA_TYPE:
12897 size += 4;
12898 break;
12899
12900 default:
12901 BFD_FAIL ();
12902 return FALSE;
12903 }
12904 }
12905
12906 return TRUE;
12907 }
12908
12909 /* Output mapping symbols for linker generated sections. */
12910
12911 static bfd_boolean
12912 elf32_arm_output_arch_local_syms (bfd *output_bfd,
12913 struct bfd_link_info *info,
12914 void *finfo,
12915 int (*func) (void *, const char *,
12916 Elf_Internal_Sym *,
12917 asection *,
12918 struct elf_link_hash_entry *))
12919 {
12920 output_arch_syminfo osi;
12921 struct elf32_arm_link_hash_table *htab;
12922 bfd_vma offset;
12923 bfd_size_type size;
12924
12925 htab = elf32_arm_hash_table (info);
12926 check_use_blx (htab);
12927
12928 osi.finfo = finfo;
12929 osi.info = info;
12930 osi.func = func;
12931
12932 /* ARM->Thumb glue. */
12933 if (htab->arm_glue_size > 0)
12934 {
12935 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12936 ARM2THUMB_GLUE_SECTION_NAME);
12937
12938 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12939 (output_bfd, osi.sec->output_section);
12940 if (info->shared || htab->root.is_relocatable_executable
12941 || htab->pic_veneer)
12942 size = ARM2THUMB_PIC_GLUE_SIZE;
12943 else if (htab->use_blx)
12944 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
12945 else
12946 size = ARM2THUMB_STATIC_GLUE_SIZE;
12947
12948 for (offset = 0; offset < htab->arm_glue_size; offset += size)
12949 {
12950 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
12951 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
12952 }
12953 }
12954
12955 /* Thumb->ARM glue. */
12956 if (htab->thumb_glue_size > 0)
12957 {
12958 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12959 THUMB2ARM_GLUE_SECTION_NAME);
12960
12961 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12962 (output_bfd, osi.sec->output_section);
12963 size = THUMB2ARM_GLUE_SIZE;
12964
12965 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
12966 {
12967 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
12968 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
12969 }
12970 }
12971
12972 /* ARMv4 BX veneers. */
12973 if (htab->bx_glue_size > 0)
12974 {
12975 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
12976 ARM_BX_GLUE_SECTION_NAME);
12977
12978 osi.sec_shndx = _bfd_elf_section_from_bfd_section
12979 (output_bfd, osi.sec->output_section);
12980
12981 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
12982 }
12983
12984 /* Long calls stubs. */
12985 if (htab->stub_bfd && htab->stub_bfd->sections)
12986 {
12987 asection* stub_sec;
12988
12989 for (stub_sec = htab->stub_bfd->sections;
12990 stub_sec != NULL;
12991 stub_sec = stub_sec->next)
12992 {
12993 /* Ignore non-stub sections. */
12994 if (!strstr (stub_sec->name, STUB_SUFFIX))
12995 continue;
12996
12997 osi.sec = stub_sec;
12998
12999 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13000 (output_bfd, osi.sec->output_section);
13001
13002 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13003 }
13004 }
13005
13006 /* Finally, output mapping symbols for the PLT. */
13007 if (!htab->splt || htab->splt->size == 0)
13008 return TRUE;
13009
13010 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13011 htab->splt->output_section);
13012 osi.sec = htab->splt;
13013 /* Output mapping symbols for the plt header. SymbianOS does not have a
13014 plt header. */
13015 if (htab->vxworks_p)
13016 {
13017 /* VxWorks shared libraries have no PLT header. */
13018 if (!info->shared)
13019 {
13020 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13021 return FALSE;
13022 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13023 return FALSE;
13024 }
13025 }
13026 else if (!htab->symbian_p)
13027 {
13028 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13029 return FALSE;
13030 #ifndef FOUR_WORD_PLT
13031 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13032 return FALSE;
13033 #endif
13034 }
13035
13036 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13037 return TRUE;
13038 }
13039
13040 /* Allocate target specific section data. */
13041
13042 static bfd_boolean
13043 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13044 {
13045 if (!sec->used_by_bfd)
13046 {
13047 _arm_elf_section_data *sdata;
13048 bfd_size_type amt = sizeof (*sdata);
13049
13050 sdata = bfd_zalloc (abfd, amt);
13051 if (sdata == NULL)
13052 return FALSE;
13053 sec->used_by_bfd = sdata;
13054 }
13055
13056 record_section_with_arm_elf_section_data (sec);
13057
13058 return _bfd_elf_new_section_hook (abfd, sec);
13059 }
13060
13061
13062 /* Used to order a list of mapping symbols by address. */
13063
13064 static int
13065 elf32_arm_compare_mapping (const void * a, const void * b)
13066 {
13067 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13068 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13069
13070 if (amap->vma > bmap->vma)
13071 return 1;
13072 else if (amap->vma < bmap->vma)
13073 return -1;
13074 else if (amap->type > bmap->type)
13075 /* Ensure results do not depend on the host qsort for objects with
13076 multiple mapping symbols at the same address by sorting on type
13077 after vma. */
13078 return 1;
13079 else if (amap->type < bmap->type)
13080 return -1;
13081 else
13082 return 0;
13083 }
13084
13085 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13086
13087 static unsigned long
13088 offset_prel31 (unsigned long addr, bfd_vma offset)
13089 {
13090 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13091 }
13092
13093 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13094 relocations. */
13095
13096 static void
13097 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13098 {
13099 unsigned long first_word = bfd_get_32 (output_bfd, from);
13100 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13101
13102 /* High bit of first word is supposed to be zero. */
13103 if ((first_word & 0x80000000ul) == 0)
13104 first_word = offset_prel31 (first_word, offset);
13105
13106 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13107 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13108 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13109 second_word = offset_prel31 (second_word, offset);
13110
13111 bfd_put_32 (output_bfd, first_word, to);
13112 bfd_put_32 (output_bfd, second_word, to + 4);
13113 }
13114
13115 /* Data for make_branch_to_a8_stub(). */
13116
13117 struct a8_branch_to_stub_data {
13118 asection *writing_section;
13119 bfd_byte *contents;
13120 };
13121
13122
13123 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13124 places for a particular section. */
13125
13126 static bfd_boolean
13127 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13128 void *in_arg)
13129 {
13130 struct elf32_arm_stub_hash_entry *stub_entry;
13131 struct a8_branch_to_stub_data *data;
13132 bfd_byte *contents;
13133 unsigned long branch_insn;
13134 bfd_vma veneered_insn_loc, veneer_entry_loc;
13135 bfd_signed_vma branch_offset;
13136 bfd *abfd;
13137 unsigned int index;
13138
13139 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13140 data = (struct a8_branch_to_stub_data *) in_arg;
13141
13142 if (stub_entry->target_section != data->writing_section
13143 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13144 return TRUE;
13145
13146 contents = data->contents;
13147
13148 veneered_insn_loc = stub_entry->target_section->output_section->vma
13149 + stub_entry->target_section->output_offset
13150 + stub_entry->target_value;
13151
13152 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13153 + stub_entry->stub_sec->output_offset
13154 + stub_entry->stub_offset;
13155
13156 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13157 veneered_insn_loc &= ~3u;
13158
13159 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13160
13161 abfd = stub_entry->target_section->owner;
13162 index = stub_entry->target_value;
13163
13164 /* We attempt to avoid this condition by setting stubs_always_after_branch
13165 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13166 This check is just to be on the safe side... */
13167 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13168 {
13169 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13170 "allocated in unsafe location"), abfd);
13171 return FALSE;
13172 }
13173
13174 switch (stub_entry->stub_type)
13175 {
13176 case arm_stub_a8_veneer_b:
13177 case arm_stub_a8_veneer_b_cond:
13178 branch_insn = 0xf0009000;
13179 goto jump24;
13180
13181 case arm_stub_a8_veneer_blx:
13182 branch_insn = 0xf000e800;
13183 goto jump24;
13184
13185 case arm_stub_a8_veneer_bl:
13186 {
13187 unsigned int i1, j1, i2, j2, s;
13188
13189 branch_insn = 0xf000d000;
13190
13191 jump24:
13192 if (branch_offset < -16777216 || branch_offset > 16777214)
13193 {
13194 /* There's not much we can do apart from complain if this
13195 happens. */
13196 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13197 "of range (input file too large)"), abfd);
13198 return FALSE;
13199 }
13200
13201 /* i1 = not(j1 eor s), so:
13202 not i1 = j1 eor s
13203 j1 = (not i1) eor s. */
13204
13205 branch_insn |= (branch_offset >> 1) & 0x7ff;
13206 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13207 i2 = (branch_offset >> 22) & 1;
13208 i1 = (branch_offset >> 23) & 1;
13209 s = (branch_offset >> 24) & 1;
13210 j1 = (!i1) ^ s;
13211 j2 = (!i2) ^ s;
13212 branch_insn |= j2 << 11;
13213 branch_insn |= j1 << 13;
13214 branch_insn |= s << 26;
13215 }
13216 break;
13217
13218 default:
13219 BFD_FAIL ();
13220 return FALSE;
13221 }
13222
13223 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[index]);
13224 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[index + 2]);
13225
13226 return TRUE;
13227 }
13228
13229 /* Do code byteswapping. Return FALSE afterwards so that the section is
13230 written out as normal. */
13231
13232 static bfd_boolean
13233 elf32_arm_write_section (bfd *output_bfd,
13234 struct bfd_link_info *link_info,
13235 asection *sec,
13236 bfd_byte *contents)
13237 {
13238 unsigned int mapcount, errcount;
13239 _arm_elf_section_data *arm_data;
13240 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13241 elf32_arm_section_map *map;
13242 elf32_vfp11_erratum_list *errnode;
13243 bfd_vma ptr;
13244 bfd_vma end;
13245 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13246 bfd_byte tmp;
13247 unsigned int i;
13248
13249 /* If this section has not been allocated an _arm_elf_section_data
13250 structure then we cannot record anything. */
13251 arm_data = get_arm_elf_section_data (sec);
13252 if (arm_data == NULL)
13253 return FALSE;
13254
13255 mapcount = arm_data->mapcount;
13256 map = arm_data->map;
13257 errcount = arm_data->erratumcount;
13258
13259 if (errcount != 0)
13260 {
13261 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13262
13263 for (errnode = arm_data->erratumlist; errnode != 0;
13264 errnode = errnode->next)
13265 {
13266 bfd_vma index = errnode->vma - offset;
13267
13268 switch (errnode->type)
13269 {
13270 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13271 {
13272 bfd_vma branch_to_veneer;
13273 /* Original condition code of instruction, plus bit mask for
13274 ARM B instruction. */
13275 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13276 | 0x0a000000;
13277
13278 /* The instruction is before the label. */
13279 index -= 4;
13280
13281 /* Above offset included in -4 below. */
13282 branch_to_veneer = errnode->u.b.veneer->vma
13283 - errnode->vma - 4;
13284
13285 if ((signed) branch_to_veneer < -(1 << 25)
13286 || (signed) branch_to_veneer >= (1 << 25))
13287 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13288 "range"), output_bfd);
13289
13290 insn |= (branch_to_veneer >> 2) & 0xffffff;
13291 contents[endianflip ^ index] = insn & 0xff;
13292 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13293 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13294 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13295 }
13296 break;
13297
13298 case VFP11_ERRATUM_ARM_VENEER:
13299 {
13300 bfd_vma branch_from_veneer;
13301 unsigned int insn;
13302
13303 /* Take size of veneer into account. */
13304 branch_from_veneer = errnode->u.v.branch->vma
13305 - errnode->vma - 12;
13306
13307 if ((signed) branch_from_veneer < -(1 << 25)
13308 || (signed) branch_from_veneer >= (1 << 25))
13309 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13310 "range"), output_bfd);
13311
13312 /* Original instruction. */
13313 insn = errnode->u.v.branch->u.b.vfp_insn;
13314 contents[endianflip ^ index] = insn & 0xff;
13315 contents[endianflip ^ (index + 1)] = (insn >> 8) & 0xff;
13316 contents[endianflip ^ (index + 2)] = (insn >> 16) & 0xff;
13317 contents[endianflip ^ (index + 3)] = (insn >> 24) & 0xff;
13318
13319 /* Branch back to insn after original insn. */
13320 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13321 contents[endianflip ^ (index + 4)] = insn & 0xff;
13322 contents[endianflip ^ (index + 5)] = (insn >> 8) & 0xff;
13323 contents[endianflip ^ (index + 6)] = (insn >> 16) & 0xff;
13324 contents[endianflip ^ (index + 7)] = (insn >> 24) & 0xff;
13325 }
13326 break;
13327
13328 default:
13329 abort ();
13330 }
13331 }
13332 }
13333
13334 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13335 {
13336 arm_unwind_table_edit *edit_node
13337 = arm_data->u.exidx.unwind_edit_list;
13338 /* Now, sec->size is the size of the section we will write. The original
13339 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13340 markers) was sec->rawsize. (This isn't the case if we perform no
13341 edits, then rawsize will be zero and we should use size). */
13342 bfd_byte *edited_contents = bfd_malloc (sec->size);
13343 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13344 unsigned int in_index, out_index;
13345 bfd_vma add_to_offsets = 0;
13346
13347 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13348 {
13349 if (edit_node)
13350 {
13351 unsigned int edit_index = edit_node->index;
13352
13353 if (in_index < edit_index && in_index * 8 < input_size)
13354 {
13355 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13356 contents + in_index * 8, add_to_offsets);
13357 out_index++;
13358 in_index++;
13359 }
13360 else if (in_index == edit_index
13361 || (in_index * 8 >= input_size
13362 && edit_index == UINT_MAX))
13363 {
13364 switch (edit_node->type)
13365 {
13366 case DELETE_EXIDX_ENTRY:
13367 in_index++;
13368 add_to_offsets += 8;
13369 break;
13370
13371 case INSERT_EXIDX_CANTUNWIND_AT_END:
13372 {
13373 asection *text_sec = edit_node->linked_section;
13374 bfd_vma text_offset = text_sec->output_section->vma
13375 + text_sec->output_offset
13376 + text_sec->size;
13377 bfd_vma exidx_offset = offset + out_index * 8;
13378 unsigned long prel31_offset;
13379
13380 /* Note: this is meant to be equivalent to an
13381 R_ARM_PREL31 relocation. These synthetic
13382 EXIDX_CANTUNWIND markers are not relocated by the
13383 usual BFD method. */
13384 prel31_offset = (text_offset - exidx_offset)
13385 & 0x7ffffffful;
13386
13387 /* First address we can't unwind. */
13388 bfd_put_32 (output_bfd, prel31_offset,
13389 &edited_contents[out_index * 8]);
13390
13391 /* Code for EXIDX_CANTUNWIND. */
13392 bfd_put_32 (output_bfd, 0x1,
13393 &edited_contents[out_index * 8 + 4]);
13394
13395 out_index++;
13396 add_to_offsets -= 8;
13397 }
13398 break;
13399 }
13400
13401 edit_node = edit_node->next;
13402 }
13403 }
13404 else
13405 {
13406 /* No more edits, copy remaining entries verbatim. */
13407 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13408 contents + in_index * 8, add_to_offsets);
13409 out_index++;
13410 in_index++;
13411 }
13412 }
13413
13414 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13415 bfd_set_section_contents (output_bfd, sec->output_section,
13416 edited_contents,
13417 (file_ptr) sec->output_offset, sec->size);
13418
13419 return TRUE;
13420 }
13421
13422 /* Fix code to point to Cortex-A8 erratum stubs. */
13423 if (globals->fix_cortex_a8)
13424 {
13425 struct a8_branch_to_stub_data data;
13426
13427 data.writing_section = sec;
13428 data.contents = contents;
13429
13430 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13431 &data);
13432 }
13433
13434 if (mapcount == 0)
13435 return FALSE;
13436
13437 if (globals->byteswap_code)
13438 {
13439 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13440
13441 ptr = map[0].vma;
13442 for (i = 0; i < mapcount; i++)
13443 {
13444 if (i == mapcount - 1)
13445 end = sec->size;
13446 else
13447 end = map[i + 1].vma;
13448
13449 switch (map[i].type)
13450 {
13451 case 'a':
13452 /* Byte swap code words. */
13453 while (ptr + 3 < end)
13454 {
13455 tmp = contents[ptr];
13456 contents[ptr] = contents[ptr + 3];
13457 contents[ptr + 3] = tmp;
13458 tmp = contents[ptr + 1];
13459 contents[ptr + 1] = contents[ptr + 2];
13460 contents[ptr + 2] = tmp;
13461 ptr += 4;
13462 }
13463 break;
13464
13465 case 't':
13466 /* Byte swap code halfwords. */
13467 while (ptr + 1 < end)
13468 {
13469 tmp = contents[ptr];
13470 contents[ptr] = contents[ptr + 1];
13471 contents[ptr + 1] = tmp;
13472 ptr += 2;
13473 }
13474 break;
13475
13476 case 'd':
13477 /* Leave data alone. */
13478 break;
13479 }
13480 ptr = end;
13481 }
13482 }
13483
13484 free (map);
13485 arm_data->mapcount = 0;
13486 arm_data->mapsize = 0;
13487 arm_data->map = NULL;
13488 unrecord_section_with_arm_elf_section_data (sec);
13489
13490 return FALSE;
13491 }
13492
13493 static void
13494 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13495 asection * sec,
13496 void * ignore ATTRIBUTE_UNUSED)
13497 {
13498 unrecord_section_with_arm_elf_section_data (sec);
13499 }
13500
13501 static bfd_boolean
13502 elf32_arm_close_and_cleanup (bfd * abfd)
13503 {
13504 if (abfd->sections)
13505 bfd_map_over_sections (abfd,
13506 unrecord_section_via_map_over_sections,
13507 NULL);
13508
13509 return _bfd_elf_close_and_cleanup (abfd);
13510 }
13511
13512 static bfd_boolean
13513 elf32_arm_bfd_free_cached_info (bfd * abfd)
13514 {
13515 if (abfd->sections)
13516 bfd_map_over_sections (abfd,
13517 unrecord_section_via_map_over_sections,
13518 NULL);
13519
13520 return _bfd_free_cached_info (abfd);
13521 }
13522
13523 /* Display STT_ARM_TFUNC symbols as functions. */
13524
13525 static void
13526 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13527 asymbol *asym)
13528 {
13529 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13530
13531 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13532 elfsym->symbol.flags |= BSF_FUNCTION;
13533 }
13534
13535
13536 /* Mangle thumb function symbols as we read them in. */
13537
13538 static bfd_boolean
13539 elf32_arm_swap_symbol_in (bfd * abfd,
13540 const void *psrc,
13541 const void *pshn,
13542 Elf_Internal_Sym *dst)
13543 {
13544 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13545 return FALSE;
13546
13547 /* New EABI objects mark thumb function symbols by setting the low bit of
13548 the address. Turn these into STT_ARM_TFUNC. */
13549 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13550 && (dst->st_value & 1))
13551 {
13552 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13553 dst->st_value &= ~(bfd_vma) 1;
13554 }
13555 return TRUE;
13556 }
13557
13558
13559 /* Mangle thumb function symbols as we write them out. */
13560
13561 static void
13562 elf32_arm_swap_symbol_out (bfd *abfd,
13563 const Elf_Internal_Sym *src,
13564 void *cdst,
13565 void *shndx)
13566 {
13567 Elf_Internal_Sym newsym;
13568
13569 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13570 of the address set, as per the new EABI. We do this unconditionally
13571 because objcopy does not set the elf header flags until after
13572 it writes out the symbol table. */
13573 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13574 {
13575 newsym = *src;
13576 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13577 if (newsym.st_shndx != SHN_UNDEF)
13578 {
13579 /* Do this only for defined symbols. At link type, the static
13580 linker will simulate the work of dynamic linker of resolving
13581 symbols and will carry over the thumbness of found symbols to
13582 the output symbol table. It's not clear how it happens, but
13583 the thumbness of undefined symbols can well be different at
13584 runtime, and writing '1' for them will be confusing for users
13585 and possibly for dynamic linker itself.
13586 */
13587 newsym.st_value |= 1;
13588 }
13589
13590 src = &newsym;
13591 }
13592 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13593 }
13594
13595 /* Add the PT_ARM_EXIDX program header. */
13596
13597 static bfd_boolean
13598 elf32_arm_modify_segment_map (bfd *abfd,
13599 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13600 {
13601 struct elf_segment_map *m;
13602 asection *sec;
13603
13604 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13605 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13606 {
13607 /* If there is already a PT_ARM_EXIDX header, then we do not
13608 want to add another one. This situation arises when running
13609 "strip"; the input binary already has the header. */
13610 m = elf_tdata (abfd)->segment_map;
13611 while (m && m->p_type != PT_ARM_EXIDX)
13612 m = m->next;
13613 if (!m)
13614 {
13615 m = bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13616 if (m == NULL)
13617 return FALSE;
13618 m->p_type = PT_ARM_EXIDX;
13619 m->count = 1;
13620 m->sections[0] = sec;
13621
13622 m->next = elf_tdata (abfd)->segment_map;
13623 elf_tdata (abfd)->segment_map = m;
13624 }
13625 }
13626
13627 return TRUE;
13628 }
13629
13630 /* We may add a PT_ARM_EXIDX program header. */
13631
13632 static int
13633 elf32_arm_additional_program_headers (bfd *abfd,
13634 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13635 {
13636 asection *sec;
13637
13638 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13639 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13640 return 1;
13641 else
13642 return 0;
13643 }
13644
13645 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13646
13647 static bfd_boolean
13648 elf32_arm_is_function_type (unsigned int type)
13649 {
13650 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13651 }
13652
13653 /* We use this to override swap_symbol_in and swap_symbol_out. */
13654 const struct elf_size_info elf32_arm_size_info =
13655 {
13656 sizeof (Elf32_External_Ehdr),
13657 sizeof (Elf32_External_Phdr),
13658 sizeof (Elf32_External_Shdr),
13659 sizeof (Elf32_External_Rel),
13660 sizeof (Elf32_External_Rela),
13661 sizeof (Elf32_External_Sym),
13662 sizeof (Elf32_External_Dyn),
13663 sizeof (Elf_External_Note),
13664 4,
13665 1,
13666 32, 2,
13667 ELFCLASS32, EV_CURRENT,
13668 bfd_elf32_write_out_phdrs,
13669 bfd_elf32_write_shdrs_and_ehdr,
13670 bfd_elf32_checksum_contents,
13671 bfd_elf32_write_relocs,
13672 elf32_arm_swap_symbol_in,
13673 elf32_arm_swap_symbol_out,
13674 bfd_elf32_slurp_reloc_table,
13675 bfd_elf32_slurp_symbol_table,
13676 bfd_elf32_swap_dyn_in,
13677 bfd_elf32_swap_dyn_out,
13678 bfd_elf32_swap_reloc_in,
13679 bfd_elf32_swap_reloc_out,
13680 bfd_elf32_swap_reloca_in,
13681 bfd_elf32_swap_reloca_out
13682 };
13683
13684 #define ELF_ARCH bfd_arch_arm
13685 #define ELF_MACHINE_CODE EM_ARM
13686 #ifdef __QNXTARGET__
13687 #define ELF_MAXPAGESIZE 0x1000
13688 #else
13689 #define ELF_MAXPAGESIZE 0x8000
13690 #endif
13691 #define ELF_MINPAGESIZE 0x1000
13692 #define ELF_COMMONPAGESIZE 0x1000
13693
13694 #define bfd_elf32_mkobject elf32_arm_mkobject
13695
13696 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13697 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13698 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13699 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13700 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13701 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13702 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13703 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13704 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13705 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13706 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13707 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13708 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13709 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13710 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13711
13712 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13713 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13714 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13715 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13716 #define elf_backend_check_relocs elf32_arm_check_relocs
13717 #define elf_backend_relocate_section elf32_arm_relocate_section
13718 #define elf_backend_write_section elf32_arm_write_section
13719 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13720 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13721 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13722 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13723 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13724 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13725 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13726 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13727 #define elf_backend_object_p elf32_arm_object_p
13728 #define elf_backend_section_flags elf32_arm_section_flags
13729 #define elf_backend_fake_sections elf32_arm_fake_sections
13730 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13731 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13732 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13733 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13734 #define elf_backend_size_info elf32_arm_size_info
13735 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13736 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13737 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13738 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13739 #define elf_backend_is_function_type elf32_arm_is_function_type
13740
13741 #define elf_backend_can_refcount 1
13742 #define elf_backend_can_gc_sections 1
13743 #define elf_backend_plt_readonly 1
13744 #define elf_backend_want_got_plt 1
13745 #define elf_backend_want_plt_sym 0
13746 #define elf_backend_may_use_rel_p 1
13747 #define elf_backend_may_use_rela_p 0
13748 #define elf_backend_default_use_rela_p 0
13749
13750 #define elf_backend_got_header_size 12
13751
13752 #undef elf_backend_obj_attrs_vendor
13753 #define elf_backend_obj_attrs_vendor "aeabi"
13754 #undef elf_backend_obj_attrs_section
13755 #define elf_backend_obj_attrs_section ".ARM.attributes"
13756 #undef elf_backend_obj_attrs_arg_type
13757 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13758 #undef elf_backend_obj_attrs_section_type
13759 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13760 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13761
13762 #include "elf32-target.h"
13763
13764 /* VxWorks Targets. */
13765
13766 #undef TARGET_LITTLE_SYM
13767 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13768 #undef TARGET_LITTLE_NAME
13769 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13770 #undef TARGET_BIG_SYM
13771 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13772 #undef TARGET_BIG_NAME
13773 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13774
13775 /* Like elf32_arm_link_hash_table_create -- but overrides
13776 appropriately for VxWorks. */
13777
13778 static struct bfd_link_hash_table *
13779 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13780 {
13781 struct bfd_link_hash_table *ret;
13782
13783 ret = elf32_arm_link_hash_table_create (abfd);
13784 if (ret)
13785 {
13786 struct elf32_arm_link_hash_table *htab
13787 = (struct elf32_arm_link_hash_table *) ret;
13788 htab->use_rel = 0;
13789 htab->vxworks_p = 1;
13790 }
13791 return ret;
13792 }
13793
13794 static void
13795 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13796 {
13797 elf32_arm_final_write_processing (abfd, linker);
13798 elf_vxworks_final_write_processing (abfd, linker);
13799 }
13800
13801 #undef elf32_bed
13802 #define elf32_bed elf32_arm_vxworks_bed
13803
13804 #undef bfd_elf32_bfd_link_hash_table_create
13805 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13806 #undef elf_backend_add_symbol_hook
13807 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13808 #undef elf_backend_final_write_processing
13809 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13810 #undef elf_backend_emit_relocs
13811 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13812
13813 #undef elf_backend_may_use_rel_p
13814 #define elf_backend_may_use_rel_p 0
13815 #undef elf_backend_may_use_rela_p
13816 #define elf_backend_may_use_rela_p 1
13817 #undef elf_backend_default_use_rela_p
13818 #define elf_backend_default_use_rela_p 1
13819 #undef elf_backend_want_plt_sym
13820 #define elf_backend_want_plt_sym 1
13821 #undef ELF_MAXPAGESIZE
13822 #define ELF_MAXPAGESIZE 0x1000
13823
13824 #include "elf32-target.h"
13825
13826
13827 /* Symbian OS Targets. */
13828
13829 #undef TARGET_LITTLE_SYM
13830 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
13831 #undef TARGET_LITTLE_NAME
13832 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
13833 #undef TARGET_BIG_SYM
13834 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
13835 #undef TARGET_BIG_NAME
13836 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
13837
13838 /* Like elf32_arm_link_hash_table_create -- but overrides
13839 appropriately for Symbian OS. */
13840
13841 static struct bfd_link_hash_table *
13842 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
13843 {
13844 struct bfd_link_hash_table *ret;
13845
13846 ret = elf32_arm_link_hash_table_create (abfd);
13847 if (ret)
13848 {
13849 struct elf32_arm_link_hash_table *htab
13850 = (struct elf32_arm_link_hash_table *)ret;
13851 /* There is no PLT header for Symbian OS. */
13852 htab->plt_header_size = 0;
13853 /* The PLT entries are each one instruction and one word. */
13854 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
13855 htab->symbian_p = 1;
13856 /* Symbian uses armv5t or above, so use_blx is always true. */
13857 htab->use_blx = 1;
13858 htab->root.is_relocatable_executable = 1;
13859 }
13860 return ret;
13861 }
13862
13863 static const struct bfd_elf_special_section
13864 elf32_arm_symbian_special_sections[] =
13865 {
13866 /* In a BPABI executable, the dynamic linking sections do not go in
13867 the loadable read-only segment. The post-linker may wish to
13868 refer to these sections, but they are not part of the final
13869 program image. */
13870 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
13871 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
13872 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
13873 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
13874 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
13875 /* These sections do not need to be writable as the SymbianOS
13876 postlinker will arrange things so that no dynamic relocation is
13877 required. */
13878 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
13879 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
13880 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
13881 { NULL, 0, 0, 0, 0 }
13882 };
13883
13884 static void
13885 elf32_arm_symbian_begin_write_processing (bfd *abfd,
13886 struct bfd_link_info *link_info)
13887 {
13888 /* BPABI objects are never loaded directly by an OS kernel; they are
13889 processed by a postlinker first, into an OS-specific format. If
13890 the D_PAGED bit is set on the file, BFD will align segments on
13891 page boundaries, so that an OS can directly map the file. With
13892 BPABI objects, that just results in wasted space. In addition,
13893 because we clear the D_PAGED bit, map_sections_to_segments will
13894 recognize that the program headers should not be mapped into any
13895 loadable segment. */
13896 abfd->flags &= ~D_PAGED;
13897 elf32_arm_begin_write_processing (abfd, link_info);
13898 }
13899
13900 static bfd_boolean
13901 elf32_arm_symbian_modify_segment_map (bfd *abfd,
13902 struct bfd_link_info *info)
13903 {
13904 struct elf_segment_map *m;
13905 asection *dynsec;
13906
13907 /* BPABI shared libraries and executables should have a PT_DYNAMIC
13908 segment. However, because the .dynamic section is not marked
13909 with SEC_LOAD, the generic ELF code will not create such a
13910 segment. */
13911 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
13912 if (dynsec)
13913 {
13914 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
13915 if (m->p_type == PT_DYNAMIC)
13916 break;
13917
13918 if (m == NULL)
13919 {
13920 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
13921 m->next = elf_tdata (abfd)->segment_map;
13922 elf_tdata (abfd)->segment_map = m;
13923 }
13924 }
13925
13926 /* Also call the generic arm routine. */
13927 return elf32_arm_modify_segment_map (abfd, info);
13928 }
13929
13930 /* Return address for Ith PLT stub in section PLT, for relocation REL
13931 or (bfd_vma) -1 if it should not be included. */
13932
13933 static bfd_vma
13934 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
13935 const arelent *rel ATTRIBUTE_UNUSED)
13936 {
13937 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
13938 }
13939
13940
13941 #undef elf32_bed
13942 #define elf32_bed elf32_arm_symbian_bed
13943
13944 /* The dynamic sections are not allocated on SymbianOS; the postlinker
13945 will process them and then discard them. */
13946 #undef ELF_DYNAMIC_SEC_FLAGS
13947 #define ELF_DYNAMIC_SEC_FLAGS \
13948 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
13949
13950 #undef elf_backend_add_symbol_hook
13951 #undef elf_backend_emit_relocs
13952
13953 #undef bfd_elf32_bfd_link_hash_table_create
13954 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
13955 #undef elf_backend_special_sections
13956 #define elf_backend_special_sections elf32_arm_symbian_special_sections
13957 #undef elf_backend_begin_write_processing
13958 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
13959 #undef elf_backend_final_write_processing
13960 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13961
13962 #undef elf_backend_modify_segment_map
13963 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
13964
13965 /* There is no .got section for BPABI objects, and hence no header. */
13966 #undef elf_backend_got_header_size
13967 #define elf_backend_got_header_size 0
13968
13969 /* Similarly, there is no .got.plt section. */
13970 #undef elf_backend_want_got_plt
13971 #define elf_backend_want_got_plt 0
13972
13973 #undef elf_backend_plt_sym_val
13974 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
13975
13976 #undef elf_backend_may_use_rel_p
13977 #define elf_backend_may_use_rel_p 1
13978 #undef elf_backend_may_use_rela_p
13979 #define elf_backend_may_use_rela_p 0
13980 #undef elf_backend_default_use_rela_p
13981 #define elf_backend_default_use_rela_p 0
13982 #undef elf_backend_want_plt_sym
13983 #define elf_backend_want_plt_sym 0
13984 #undef ELF_MAXPAGESIZE
13985 #define ELF_MAXPAGESIZE 0x8000
13986
13987 #include "elf32-target.h"
This page took 0.305761 seconds and 5 git commands to generate.