6197f95041a703fa8b778b19481f5d291b3a5525
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "sysdep.h"
23 #include <limits.h>
24
25 #include "bfd.h"
26 #include "bfd_stdint.h"
27 #include "libiberty.h"
28 #include "libbfd.h"
29 #include "elf-bfd.h"
30 #include "elf-nacl.h"
31 #include "elf-vxworks.h"
32 #include "elf/arm.h"
33
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
45
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
52
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
59
60 #define elf_info_to_howto 0
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
68
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
73
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
77
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 0, /* size (0 = byte, 1 = short, 2 = long) */
84 0, /* bitsize */
85 FALSE, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 FALSE), /* pcrel_offset */
94
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
98 24, /* bitsize */
99 TRUE, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE), /* pcrel_offset */
108
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
113 32, /* bitsize */
114 FALSE, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE), /* pcrel_offset */
123
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
128 32, /* bitsize */
129 TRUE, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE), /* pcrel_offset */
138
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
143 32, /* bitsize */
144 TRUE, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE), /* pcrel_offset */
153
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 FALSE, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE), /* pcrel_offset */
168
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
173 12, /* bitsize */
174 FALSE, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE), /* pcrel_offset */
183
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
187 5, /* bitsize */
188 FALSE, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE), /* pcrel_offset */
197
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 FALSE, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE), /* pcrel_offset */
212
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
216 32, /* bitsize */
217 FALSE, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE), /* pcrel_offset */
226
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
230 24, /* bitsize */
231 TRUE, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE), /* pcrel_offset */
240
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
244 8, /* bitsize */
245 TRUE, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE), /* pcrel_offset */
254
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 32, /* bitsize */
259 FALSE, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE), /* pcrel_offset */
268
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 FALSE, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE), /* pcrel_offset */
282
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
286 0, /* bitsize */
287 FALSE, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE), /* pcrel_offset */
296
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 24, /* bitsize */
302 TRUE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE), /* pcrel_offset */
311
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 24, /* bitsize */
317 TRUE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE), /* pcrel_offset */
326
327 /* Dynamic TLS relocations. */
328
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
332 32, /* bitsize */
333 FALSE, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE), /* pcrel_offset */
342
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
360 32, /* bitsize */
361 FALSE, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE), /* pcrel_offset */
370
371 /* Relocs used in ARM Linux */
372
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 32, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 32, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
404 32, /* bitsize */
405 FALSE, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
414
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
418 32, /* bitsize */
419 FALSE, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE), /* pcrel_offset */
428
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
432 32, /* bitsize */
433 FALSE, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE), /* pcrel_offset */
442
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
446 32, /* bitsize */
447 TRUE, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE), /* pcrel_offset */
456
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 32, /* bitsize */
461 FALSE, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
470
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
474 24, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 24, /* bitsize */
489 TRUE, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE), /* pcrel_offset */
498
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 24, /* bitsize */
503 TRUE, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE), /* pcrel_offset */
512
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 24, /* bitsize */
517 TRUE, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE), /* pcrel_offset */
526
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
530 32, /* bitsize */
531 FALSE, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE), /* pcrel_offset */
540
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 12, /* bitsize */
545 TRUE, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE), /* pcrel_offset */
554
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
558 12, /* bitsize */
559 TRUE, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE), /* pcrel_offset */
568
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 12, /* bitsize */
573 TRUE, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE), /* pcrel_offset */
582
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
586 12, /* bitsize */
587 FALSE, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE), /* pcrel_offset */
596
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 8, /* bitsize */
601 FALSE, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE), /* pcrel_offset */
610
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 8, /* bitsize */
615 FALSE, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE), /* pcrel_offset */
624
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 32, /* bitsize */
629 FALSE, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE), /* pcrel_offset */
638
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 32, /* bitsize */
643 FALSE, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
652
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 32, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
670 32, /* bitsize */
671 FALSE, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE), /* pcrel_offset */
680
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 31, /* bitsize */
685 TRUE, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE), /* pcrel_offset */
694
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
698 16, /* bitsize */
699 FALSE, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE), /* pcrel_offset */
708
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
712 16, /* bitsize */
713 FALSE, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE), /* pcrel_offset */
722
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
726 16, /* bitsize */
727 TRUE, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE), /* pcrel_offset */
736
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 16, /* bitsize */
741 TRUE, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE), /* pcrel_offset */
750
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 16, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
768 16, /* bitsize */
769 FALSE, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE), /* pcrel_offset */
778
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 16, /* bitsize */
783 TRUE, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE), /* pcrel_offset */
792
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 16, /* bitsize */
797 TRUE, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE), /* pcrel_offset */
806
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 19, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
824 6, /* bitsize */
825 TRUE, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE), /* pcrel_offset */
834
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 13, /* bitsize */
842 TRUE, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE), /* pcrel_offset */
851
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 13, /* bitsize */
856 TRUE, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
865
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
869 32, /* bitsize */
870 FALSE, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE), /* pcrel_offset */
879
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 32, /* bitsize */
884 TRUE, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE), /* pcrel_offset */
893
894 /* Group relocations. */
895
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 32, /* bitsize */
900 TRUE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE), /* pcrel_offset */
909
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 32, /* bitsize */
914 TRUE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE), /* pcrel_offset */
923
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 32, /* bitsize */
928 TRUE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE), /* pcrel_offset */
937
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 32, /* bitsize */
942 TRUE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE), /* pcrel_offset */
951
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 32, /* bitsize */
956 TRUE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
965
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 32, /* bitsize */
970 TRUE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
979
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 32, /* bitsize */
984 TRUE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE), /* pcrel_offset */
993
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 32, /* bitsize */
998 TRUE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE), /* pcrel_offset */
1007
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 32, /* bitsize */
1012 TRUE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE), /* pcrel_offset */
1021
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 TRUE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE), /* pcrel_offset */
1035
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 TRUE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE), /* pcrel_offset */
1049
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 32, /* bitsize */
1054 TRUE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE), /* pcrel_offset */
1063
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 32, /* bitsize */
1068 TRUE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE), /* pcrel_offset */
1077
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 32, /* bitsize */
1082 TRUE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE), /* pcrel_offset */
1091
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 32, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 32, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 32, /* bitsize */
1124 TRUE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE), /* pcrel_offset */
1133
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 32, /* bitsize */
1138 TRUE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE), /* pcrel_offset */
1147
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 32, /* bitsize */
1152 TRUE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE), /* pcrel_offset */
1161
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 32, /* bitsize */
1166 TRUE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE), /* pcrel_offset */
1175
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 32, /* bitsize */
1180 TRUE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE), /* pcrel_offset */
1189
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 32, /* bitsize */
1194 TRUE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE), /* pcrel_offset */
1203
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 32, /* bitsize */
1208 TRUE, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE), /* pcrel_offset */
1217
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 32, /* bitsize */
1222 TRUE, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE), /* pcrel_offset */
1231
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 32, /* bitsize */
1236 TRUE, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1245
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 32, /* bitsize */
1250 TRUE, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE), /* pcrel_offset */
1259
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 32, /* bitsize */
1264 TRUE, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE), /* pcrel_offset */
1273
1274 /* End of group relocations. */
1275
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 16, /* bitsize */
1280 FALSE, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE), /* pcrel_offset */
1289
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 16, /* bitsize */
1294 FALSE, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE), /* pcrel_offset */
1303
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 16, /* bitsize */
1308 FALSE, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE), /* pcrel_offset */
1317
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 16, /* bitsize */
1322 FALSE, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE), /* pcrel_offset */
1331
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 16, /* bitsize */
1336 FALSE, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE), /* pcrel_offset */
1345
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 16, /* bitsize */
1350 FALSE, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE), /* pcrel_offset */
1359
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 24, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 0, /* bitsize */
1392 FALSE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_bitfield,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 24, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 32, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 32, /* bitsize */
1434 FALSE, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE), /* pcrel_offset */
1443
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 32, /* bitsize */
1448 TRUE, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE), /* pcrel_offset */
1457
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 12, /* bitsize */
1462 FALSE, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1471
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 12, /* bitsize */
1476 FALSE, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 0, /* bitsize */
1493 FALSE, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 FALSE), /* pcrel_offset */
1502
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 FALSE, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 FALSE), /* pcrel_offset */
1517
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 11, /* bitsize */
1522 TRUE, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE), /* pcrel_offset */
1531
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 8, /* bitsize */
1536 TRUE, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE), /* pcrel_offset */
1545
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 32, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 32, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 32, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 32, /* bitsize */
1607 FALSE, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 bfd_elf_generic_reloc, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE), /* pcrel_offset */
1616
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 12, /* bitsize */
1621 FALSE, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE), /* pcrel_offset */
1630
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 12, /* bitsize */
1635 FALSE, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE), /* pcrel_offset */
1644
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 12, /* bitsize */
1649 FALSE, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE), /* pcrel_offset */
1658
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1676
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1679
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 0, /* bitsize */
1684 FALSE, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_bitfield,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE), /* pcrel_offset */
1693 };
1694
1695 /* 160 onwards: */
1696 static reloc_howto_type elf32_arm_howto_table_2[1] =
1697 {
1698 HOWTO (R_ARM_IRELATIVE, /* type */
1699 0, /* rightshift */
1700 2, /* size (0 = byte, 1 = short, 2 = long) */
1701 32, /* bitsize */
1702 FALSE, /* pc_relative */
1703 0, /* bitpos */
1704 complain_overflow_bitfield,/* complain_on_overflow */
1705 bfd_elf_generic_reloc, /* special_function */
1706 "R_ARM_IRELATIVE", /* name */
1707 TRUE, /* partial_inplace */
1708 0xffffffff, /* src_mask */
1709 0xffffffff, /* dst_mask */
1710 FALSE) /* pcrel_offset */
1711 };
1712
1713 /* 249-255 extended, currently unused, relocations: */
1714 static reloc_howto_type elf32_arm_howto_table_3[4] =
1715 {
1716 HOWTO (R_ARM_RREL32, /* type */
1717 0, /* rightshift */
1718 0, /* size (0 = byte, 1 = short, 2 = long) */
1719 0, /* bitsize */
1720 FALSE, /* pc_relative */
1721 0, /* bitpos */
1722 complain_overflow_dont,/* complain_on_overflow */
1723 bfd_elf_generic_reloc, /* special_function */
1724 "R_ARM_RREL32", /* name */
1725 FALSE, /* partial_inplace */
1726 0, /* src_mask */
1727 0, /* dst_mask */
1728 FALSE), /* pcrel_offset */
1729
1730 HOWTO (R_ARM_RABS32, /* type */
1731 0, /* rightshift */
1732 0, /* size (0 = byte, 1 = short, 2 = long) */
1733 0, /* bitsize */
1734 FALSE, /* pc_relative */
1735 0, /* bitpos */
1736 complain_overflow_dont,/* complain_on_overflow */
1737 bfd_elf_generic_reloc, /* special_function */
1738 "R_ARM_RABS32", /* name */
1739 FALSE, /* partial_inplace */
1740 0, /* src_mask */
1741 0, /* dst_mask */
1742 FALSE), /* pcrel_offset */
1743
1744 HOWTO (R_ARM_RPC24, /* type */
1745 0, /* rightshift */
1746 0, /* size (0 = byte, 1 = short, 2 = long) */
1747 0, /* bitsize */
1748 FALSE, /* pc_relative */
1749 0, /* bitpos */
1750 complain_overflow_dont,/* complain_on_overflow */
1751 bfd_elf_generic_reloc, /* special_function */
1752 "R_ARM_RPC24", /* name */
1753 FALSE, /* partial_inplace */
1754 0, /* src_mask */
1755 0, /* dst_mask */
1756 FALSE), /* pcrel_offset */
1757
1758 HOWTO (R_ARM_RBASE, /* type */
1759 0, /* rightshift */
1760 0, /* size (0 = byte, 1 = short, 2 = long) */
1761 0, /* bitsize */
1762 FALSE, /* pc_relative */
1763 0, /* bitpos */
1764 complain_overflow_dont,/* complain_on_overflow */
1765 bfd_elf_generic_reloc, /* special_function */
1766 "R_ARM_RBASE", /* name */
1767 FALSE, /* partial_inplace */
1768 0, /* src_mask */
1769 0, /* dst_mask */
1770 FALSE) /* pcrel_offset */
1771 };
1772
1773 static reloc_howto_type *
1774 elf32_arm_howto_from_type (unsigned int r_type)
1775 {
1776 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1777 return &elf32_arm_howto_table_1[r_type];
1778
1779 if (r_type == R_ARM_IRELATIVE)
1780 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1781
1782 if (r_type >= R_ARM_RREL32
1783 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1784 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1785
1786 return NULL;
1787 }
1788
1789 static void
1790 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1791 Elf_Internal_Rela * elf_reloc)
1792 {
1793 unsigned int r_type;
1794
1795 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1796 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1797 }
1798
1799 struct elf32_arm_reloc_map
1800 {
1801 bfd_reloc_code_real_type bfd_reloc_val;
1802 unsigned char elf_reloc_val;
1803 };
1804
1805 /* All entries in this list must also be present in elf32_arm_howto_table. */
1806 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1807 {
1808 {BFD_RELOC_NONE, R_ARM_NONE},
1809 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1810 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1811 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1812 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1813 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1814 {BFD_RELOC_32, R_ARM_ABS32},
1815 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1816 {BFD_RELOC_8, R_ARM_ABS8},
1817 {BFD_RELOC_16, R_ARM_ABS16},
1818 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1819 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1825 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1826 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1827 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1828 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1829 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1830 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1831 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1832 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1833 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1834 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1835 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1836 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1837 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1838 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1839 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1840 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1841 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1842 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1843 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1844 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1845 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1846 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1847 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1848 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1849 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1850 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1851 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1852 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1853 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1854 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1855 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1856 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1857 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1858 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1859 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1860 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1861 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1862 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1863 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1864 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1865 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1866 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1867 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1868 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1869 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1870 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1871 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1872 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1873 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1874 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1875 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1876 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1877 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1878 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1879 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1880 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1881 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1882 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1883 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1884 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1885 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1886 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1887 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1888 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1889 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1890 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1891 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1892 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1893 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1894 };
1895
1896 static reloc_howto_type *
1897 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1898 bfd_reloc_code_real_type code)
1899 {
1900 unsigned int i;
1901
1902 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1903 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1904 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1905
1906 return NULL;
1907 }
1908
1909 static reloc_howto_type *
1910 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1911 const char *r_name)
1912 {
1913 unsigned int i;
1914
1915 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1916 if (elf32_arm_howto_table_1[i].name != NULL
1917 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1918 return &elf32_arm_howto_table_1[i];
1919
1920 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1921 if (elf32_arm_howto_table_2[i].name != NULL
1922 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1923 return &elf32_arm_howto_table_2[i];
1924
1925 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1926 if (elf32_arm_howto_table_3[i].name != NULL
1927 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1928 return &elf32_arm_howto_table_3[i];
1929
1930 return NULL;
1931 }
1932
1933 /* Support for core dump NOTE sections. */
1934
1935 static bfd_boolean
1936 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1937 {
1938 int offset;
1939 size_t size;
1940
1941 switch (note->descsz)
1942 {
1943 default:
1944 return FALSE;
1945
1946 case 148: /* Linux/ARM 32-bit. */
1947 /* pr_cursig */
1948 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1949
1950 /* pr_pid */
1951 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1952
1953 /* pr_reg */
1954 offset = 72;
1955 size = 72;
1956
1957 break;
1958 }
1959
1960 /* Make a ".reg/999" section. */
1961 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1962 size, note->descpos + offset);
1963 }
1964
1965 static bfd_boolean
1966 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1967 {
1968 switch (note->descsz)
1969 {
1970 default:
1971 return FALSE;
1972
1973 case 124: /* Linux/ARM elf_prpsinfo. */
1974 elf_tdata (abfd)->core_pid
1975 = bfd_get_32 (abfd, note->descdata + 12);
1976 elf_tdata (abfd)->core_program
1977 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1978 elf_tdata (abfd)->core_command
1979 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1980 }
1981
1982 /* Note that for some reason, a spurious space is tacked
1983 onto the end of the args in some (at least one anyway)
1984 implementations, so strip it off if it exists. */
1985 {
1986 char *command = elf_tdata (abfd)->core_command;
1987 int n = strlen (command);
1988
1989 if (0 < n && command[n - 1] == ' ')
1990 command[n - 1] = '\0';
1991 }
1992
1993 return TRUE;
1994 }
1995
1996 static char *
1997 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
1998 int note_type, ...)
1999 {
2000 switch (note_type)
2001 {
2002 default:
2003 return NULL;
2004
2005 case NT_PRPSINFO:
2006 {
2007 char data[124];
2008 va_list ap;
2009
2010 va_start (ap, note_type);
2011 memset (data, 0, sizeof (data));
2012 strncpy (data + 28, va_arg (ap, const char *), 16);
2013 strncpy (data + 44, va_arg (ap, const char *), 80);
2014 va_end (ap);
2015
2016 return elfcore_write_note (abfd, buf, bufsiz,
2017 "CORE", note_type, data, sizeof (data));
2018 }
2019
2020 case NT_PRSTATUS:
2021 {
2022 char data[148];
2023 va_list ap;
2024 long pid;
2025 int cursig;
2026 const void *greg;
2027
2028 va_start (ap, note_type);
2029 memset (data, 0, sizeof (data));
2030 pid = va_arg (ap, long);
2031 bfd_put_32 (abfd, pid, data + 24);
2032 cursig = va_arg (ap, int);
2033 bfd_put_16 (abfd, cursig, data + 12);
2034 greg = va_arg (ap, const void *);
2035 memcpy (data + 72, greg, 72);
2036 va_end (ap);
2037
2038 return elfcore_write_note (abfd, buf, bufsiz,
2039 "CORE", note_type, data, sizeof (data));
2040 }
2041 }
2042 }
2043
2044 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
2045 #define TARGET_LITTLE_NAME "elf32-littlearm"
2046 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
2047 #define TARGET_BIG_NAME "elf32-bigarm"
2048
2049 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2050 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2051 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2052
2053 typedef unsigned long int insn32;
2054 typedef unsigned short int insn16;
2055
2056 /* In lieu of proper flags, assume all EABIv4 or later objects are
2057 interworkable. */
2058 #define INTERWORK_FLAG(abfd) \
2059 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2060 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2061 || ((abfd)->flags & BFD_LINKER_CREATED))
2062
2063 /* The linker script knows the section names for placement.
2064 The entry_names are used to do simple name mangling on the stubs.
2065 Given a function name, and its type, the stub can be found. The
2066 name can be changed. The only requirement is the %s be present. */
2067 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2068 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2069
2070 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2071 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2072
2073 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2074 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2075
2076 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2077 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2078
2079 #define STUB_ENTRY_NAME "__%s_veneer"
2080
2081 /* The name of the dynamic interpreter. This is put in the .interp
2082 section. */
2083 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2084
2085 static const unsigned long tls_trampoline [] =
2086 {
2087 0xe08e0000, /* add r0, lr, r0 */
2088 0xe5901004, /* ldr r1, [r0,#4] */
2089 0xe12fff11, /* bx r1 */
2090 };
2091
2092 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2093 {
2094 0xe52d2004, /* push {r2} */
2095 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2096 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2097 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2098 0xe081100f, /* 2: add r1, pc */
2099 0xe12fff12, /* bx r2 */
2100 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2101 + dl_tlsdesc_lazy_resolver(GOT) */
2102 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2103 };
2104
2105 #ifdef FOUR_WORD_PLT
2106
2107 /* The first entry in a procedure linkage table looks like
2108 this. It is set up so that any shared library function that is
2109 called before the relocation has been set up calls the dynamic
2110 linker first. */
2111 static const bfd_vma elf32_arm_plt0_entry [] =
2112 {
2113 0xe52de004, /* str lr, [sp, #-4]! */
2114 0xe59fe010, /* ldr lr, [pc, #16] */
2115 0xe08fe00e, /* add lr, pc, lr */
2116 0xe5bef008, /* ldr pc, [lr, #8]! */
2117 };
2118
2119 /* Subsequent entries in a procedure linkage table look like
2120 this. */
2121 static const bfd_vma elf32_arm_plt_entry [] =
2122 {
2123 0xe28fc600, /* add ip, pc, #NN */
2124 0xe28cca00, /* add ip, ip, #NN */
2125 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2126 0x00000000, /* unused */
2127 };
2128
2129 #else
2130
2131 /* The first entry in a procedure linkage table looks like
2132 this. It is set up so that any shared library function that is
2133 called before the relocation has been set up calls the dynamic
2134 linker first. */
2135 static const bfd_vma elf32_arm_plt0_entry [] =
2136 {
2137 0xe52de004, /* str lr, [sp, #-4]! */
2138 0xe59fe004, /* ldr lr, [pc, #4] */
2139 0xe08fe00e, /* add lr, pc, lr */
2140 0xe5bef008, /* ldr pc, [lr, #8]! */
2141 0x00000000, /* &GOT[0] - . */
2142 };
2143
2144 /* Subsequent entries in a procedure linkage table look like
2145 this. */
2146 static const bfd_vma elf32_arm_plt_entry [] =
2147 {
2148 0xe28fc600, /* add ip, pc, #0xNN00000 */
2149 0xe28cca00, /* add ip, ip, #0xNN000 */
2150 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2151 };
2152
2153 #endif
2154
2155 /* The format of the first entry in the procedure linkage table
2156 for a VxWorks executable. */
2157 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2158 {
2159 0xe52dc008, /* str ip,[sp,#-8]! */
2160 0xe59fc000, /* ldr ip,[pc] */
2161 0xe59cf008, /* ldr pc,[ip,#8] */
2162 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2163 };
2164
2165 /* The format of subsequent entries in a VxWorks executable. */
2166 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2167 {
2168 0xe59fc000, /* ldr ip,[pc] */
2169 0xe59cf000, /* ldr pc,[ip] */
2170 0x00000000, /* .long @got */
2171 0xe59fc000, /* ldr ip,[pc] */
2172 0xea000000, /* b _PLT */
2173 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2174 };
2175
2176 /* The format of entries in a VxWorks shared library. */
2177 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2178 {
2179 0xe59fc000, /* ldr ip,[pc] */
2180 0xe79cf009, /* ldr pc,[ip,r9] */
2181 0x00000000, /* .long @got */
2182 0xe59fc000, /* ldr ip,[pc] */
2183 0xe599f008, /* ldr pc,[r9,#8] */
2184 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2185 };
2186
2187 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2188 #define PLT_THUMB_STUB_SIZE 4
2189 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2190 {
2191 0x4778, /* bx pc */
2192 0x46c0 /* nop */
2193 };
2194
2195 /* The entries in a PLT when using a DLL-based target with multiple
2196 address spaces. */
2197 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2198 {
2199 0xe51ff004, /* ldr pc, [pc, #-4] */
2200 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2201 };
2202
2203 /* The first entry in a procedure linkage table looks like
2204 this. It is set up so that any shared library function that is
2205 called before the relocation has been set up calls the dynamic
2206 linker first. */
2207 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2208 {
2209 /* First bundle: */
2210 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2211 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2212 0xe08cc00f, /* add ip, ip, pc */
2213 0xe52dc008, /* str ip, [sp, #-8]! */
2214 /* Second bundle: */
2215 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2216 0xe59cc000, /* ldr ip, [ip] */
2217 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2218 0xe12fff1c, /* bx ip */
2219 /* Third bundle: */
2220 0xe320f000, /* nop */
2221 0xe320f000, /* nop */
2222 0xe320f000, /* nop */
2223 /* .Lplt_tail: */
2224 0xe50dc004, /* str ip, [sp, #-4] */
2225 /* Fourth bundle: */
2226 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2227 0xe59cc000, /* ldr ip, [ip] */
2228 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2229 0xe12fff1c, /* bx ip */
2230 };
2231 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2232
2233 /* Subsequent entries in a procedure linkage table look like this. */
2234 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2235 {
2236 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2237 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2238 0xe08cc00f, /* add ip, ip, pc */
2239 0xea000000, /* b .Lplt_tail */
2240 };
2241
2242 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2243 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2244 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2245 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2246 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2247 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2248
2249 enum stub_insn_type
2250 {
2251 THUMB16_TYPE = 1,
2252 THUMB32_TYPE,
2253 ARM_TYPE,
2254 DATA_TYPE
2255 };
2256
2257 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2258 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2259 is inserted in arm_build_one_stub(). */
2260 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2261 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2262 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2263 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2264 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2265 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2266
2267 typedef struct
2268 {
2269 bfd_vma data;
2270 enum stub_insn_type type;
2271 unsigned int r_type;
2272 int reloc_addend;
2273 } insn_sequence;
2274
2275 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2276 to reach the stub if necessary. */
2277 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2278 {
2279 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2280 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2281 };
2282
2283 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2284 available. */
2285 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2286 {
2287 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2288 ARM_INSN (0xe12fff1c), /* bx ip */
2289 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2290 };
2291
2292 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2293 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2294 {
2295 THUMB16_INSN (0xb401), /* push {r0} */
2296 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2297 THUMB16_INSN (0x4684), /* mov ip, r0 */
2298 THUMB16_INSN (0xbc01), /* pop {r0} */
2299 THUMB16_INSN (0x4760), /* bx ip */
2300 THUMB16_INSN (0xbf00), /* nop */
2301 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2302 };
2303
2304 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2305 allowed. */
2306 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2307 {
2308 THUMB16_INSN (0x4778), /* bx pc */
2309 THUMB16_INSN (0x46c0), /* nop */
2310 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2311 ARM_INSN (0xe12fff1c), /* bx ip */
2312 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2313 };
2314
2315 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2316 available. */
2317 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2318 {
2319 THUMB16_INSN (0x4778), /* bx pc */
2320 THUMB16_INSN (0x46c0), /* nop */
2321 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2322 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2323 };
2324
2325 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2326 one, when the destination is close enough. */
2327 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2328 {
2329 THUMB16_INSN (0x4778), /* bx pc */
2330 THUMB16_INSN (0x46c0), /* nop */
2331 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2332 };
2333
2334 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2335 blx to reach the stub if necessary. */
2336 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2337 {
2338 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2339 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2340 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2341 };
2342
2343 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2344 blx to reach the stub if necessary. We can not add into pc;
2345 it is not guaranteed to mode switch (different in ARMv6 and
2346 ARMv7). */
2347 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2348 {
2349 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2350 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2351 ARM_INSN (0xe12fff1c), /* bx ip */
2352 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2353 };
2354
2355 /* V4T ARM -> ARM long branch stub, PIC. */
2356 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2357 {
2358 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2359 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2360 ARM_INSN (0xe12fff1c), /* bx ip */
2361 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2362 };
2363
2364 /* V4T Thumb -> ARM long branch stub, PIC. */
2365 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2366 {
2367 THUMB16_INSN (0x4778), /* bx pc */
2368 THUMB16_INSN (0x46c0), /* nop */
2369 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2370 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2371 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2372 };
2373
2374 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2375 architectures. */
2376 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2377 {
2378 THUMB16_INSN (0xb401), /* push {r0} */
2379 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2380 THUMB16_INSN (0x46fc), /* mov ip, pc */
2381 THUMB16_INSN (0x4484), /* add ip, r0 */
2382 THUMB16_INSN (0xbc01), /* pop {r0} */
2383 THUMB16_INSN (0x4760), /* bx ip */
2384 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2385 };
2386
2387 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2388 allowed. */
2389 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2390 {
2391 THUMB16_INSN (0x4778), /* bx pc */
2392 THUMB16_INSN (0x46c0), /* nop */
2393 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2394 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2395 ARM_INSN (0xe12fff1c), /* bx ip */
2396 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2397 };
2398
2399 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2400 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2401 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2402 {
2403 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2404 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2405 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2406 };
2407
2408 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2409 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2410 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2411 {
2412 THUMB16_INSN (0x4778), /* bx pc */
2413 THUMB16_INSN (0x46c0), /* nop */
2414 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2415 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2416 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2417 };
2418
2419 /* Cortex-A8 erratum-workaround stubs. */
2420
2421 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2422 can't use a conditional branch to reach this stub). */
2423
2424 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2425 {
2426 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2427 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2428 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2429 };
2430
2431 /* Stub used for b.w and bl.w instructions. */
2432
2433 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2434 {
2435 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2436 };
2437
2438 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2439 {
2440 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2441 };
2442
2443 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2444 instruction (which switches to ARM mode) to point to this stub. Jump to the
2445 real destination using an ARM-mode branch. */
2446
2447 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2448 {
2449 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2450 };
2451
2452 /* For each section group there can be a specially created linker section
2453 to hold the stubs for that group. The name of the stub section is based
2454 upon the name of another section within that group with the suffix below
2455 applied.
2456
2457 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2458 create what appeared to be a linker stub section when it actually
2459 contained user code/data. For example, consider this fragment:
2460
2461 const char * stubborn_problems[] = { "np" };
2462
2463 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2464 section called:
2465
2466 .data.rel.local.stubborn_problems
2467
2468 This then causes problems in arm32_arm_build_stubs() as it triggers:
2469
2470 // Ignore non-stub sections.
2471 if (!strstr (stub_sec->name, STUB_SUFFIX))
2472 continue;
2473
2474 And so the section would be ignored instead of being processed. Hence
2475 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2476 C identifier. */
2477 #define STUB_SUFFIX ".__stub"
2478
2479 /* One entry per long/short branch stub defined above. */
2480 #define DEF_STUBS \
2481 DEF_STUB(long_branch_any_any) \
2482 DEF_STUB(long_branch_v4t_arm_thumb) \
2483 DEF_STUB(long_branch_thumb_only) \
2484 DEF_STUB(long_branch_v4t_thumb_thumb) \
2485 DEF_STUB(long_branch_v4t_thumb_arm) \
2486 DEF_STUB(short_branch_v4t_thumb_arm) \
2487 DEF_STUB(long_branch_any_arm_pic) \
2488 DEF_STUB(long_branch_any_thumb_pic) \
2489 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2490 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2491 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2492 DEF_STUB(long_branch_thumb_only_pic) \
2493 DEF_STUB(long_branch_any_tls_pic) \
2494 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2495 DEF_STUB(a8_veneer_b_cond) \
2496 DEF_STUB(a8_veneer_b) \
2497 DEF_STUB(a8_veneer_bl) \
2498 DEF_STUB(a8_veneer_blx)
2499
2500 #define DEF_STUB(x) arm_stub_##x,
2501 enum elf32_arm_stub_type
2502 {
2503 arm_stub_none,
2504 DEF_STUBS
2505 /* Note the first a8_veneer type */
2506 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2507 };
2508 #undef DEF_STUB
2509
2510 typedef struct
2511 {
2512 const insn_sequence* template_sequence;
2513 int template_size;
2514 } stub_def;
2515
2516 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2517 static const stub_def stub_definitions[] =
2518 {
2519 {NULL, 0},
2520 DEF_STUBS
2521 };
2522
2523 struct elf32_arm_stub_hash_entry
2524 {
2525 /* Base hash table entry structure. */
2526 struct bfd_hash_entry root;
2527
2528 /* The stub section. */
2529 asection *stub_sec;
2530
2531 /* Offset within stub_sec of the beginning of this stub. */
2532 bfd_vma stub_offset;
2533
2534 /* Given the symbol's value and its section we can determine its final
2535 value when building the stubs (so the stub knows where to jump). */
2536 bfd_vma target_value;
2537 asection *target_section;
2538
2539 /* Offset to apply to relocation referencing target_value. */
2540 bfd_vma target_addend;
2541
2542 /* The instruction which caused this stub to be generated (only valid for
2543 Cortex-A8 erratum workaround stubs at present). */
2544 unsigned long orig_insn;
2545
2546 /* The stub type. */
2547 enum elf32_arm_stub_type stub_type;
2548 /* Its encoding size in bytes. */
2549 int stub_size;
2550 /* Its template. */
2551 const insn_sequence *stub_template;
2552 /* The size of the template (number of entries). */
2553 int stub_template_size;
2554
2555 /* The symbol table entry, if any, that this was derived from. */
2556 struct elf32_arm_link_hash_entry *h;
2557
2558 /* Type of branch. */
2559 enum arm_st_branch_type branch_type;
2560
2561 /* Where this stub is being called from, or, in the case of combined
2562 stub sections, the first input section in the group. */
2563 asection *id_sec;
2564
2565 /* The name for the local symbol at the start of this stub. The
2566 stub name in the hash table has to be unique; this does not, so
2567 it can be friendlier. */
2568 char *output_name;
2569 };
2570
2571 /* Used to build a map of a section. This is required for mixed-endian
2572 code/data. */
2573
2574 typedef struct elf32_elf_section_map
2575 {
2576 bfd_vma vma;
2577 char type;
2578 }
2579 elf32_arm_section_map;
2580
2581 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2582
2583 typedef enum
2584 {
2585 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2586 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2587 VFP11_ERRATUM_ARM_VENEER,
2588 VFP11_ERRATUM_THUMB_VENEER
2589 }
2590 elf32_vfp11_erratum_type;
2591
2592 typedef struct elf32_vfp11_erratum_list
2593 {
2594 struct elf32_vfp11_erratum_list *next;
2595 bfd_vma vma;
2596 union
2597 {
2598 struct
2599 {
2600 struct elf32_vfp11_erratum_list *veneer;
2601 unsigned int vfp_insn;
2602 } b;
2603 struct
2604 {
2605 struct elf32_vfp11_erratum_list *branch;
2606 unsigned int id;
2607 } v;
2608 } u;
2609 elf32_vfp11_erratum_type type;
2610 }
2611 elf32_vfp11_erratum_list;
2612
2613 typedef enum
2614 {
2615 DELETE_EXIDX_ENTRY,
2616 INSERT_EXIDX_CANTUNWIND_AT_END
2617 }
2618 arm_unwind_edit_type;
2619
2620 /* A (sorted) list of edits to apply to an unwind table. */
2621 typedef struct arm_unwind_table_edit
2622 {
2623 arm_unwind_edit_type type;
2624 /* Note: we sometimes want to insert an unwind entry corresponding to a
2625 section different from the one we're currently writing out, so record the
2626 (text) section this edit relates to here. */
2627 asection *linked_section;
2628 unsigned int index;
2629 struct arm_unwind_table_edit *next;
2630 }
2631 arm_unwind_table_edit;
2632
2633 typedef struct _arm_elf_section_data
2634 {
2635 /* Information about mapping symbols. */
2636 struct bfd_elf_section_data elf;
2637 unsigned int mapcount;
2638 unsigned int mapsize;
2639 elf32_arm_section_map *map;
2640 /* Information about CPU errata. */
2641 unsigned int erratumcount;
2642 elf32_vfp11_erratum_list *erratumlist;
2643 /* Information about unwind tables. */
2644 union
2645 {
2646 /* Unwind info attached to a text section. */
2647 struct
2648 {
2649 asection *arm_exidx_sec;
2650 } text;
2651
2652 /* Unwind info attached to an .ARM.exidx section. */
2653 struct
2654 {
2655 arm_unwind_table_edit *unwind_edit_list;
2656 arm_unwind_table_edit *unwind_edit_tail;
2657 } exidx;
2658 } u;
2659 }
2660 _arm_elf_section_data;
2661
2662 #define elf32_arm_section_data(sec) \
2663 ((_arm_elf_section_data *) elf_section_data (sec))
2664
2665 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2666 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2667 so may be created multiple times: we use an array of these entries whilst
2668 relaxing which we can refresh easily, then create stubs for each potentially
2669 erratum-triggering instruction once we've settled on a solution. */
2670
2671 struct a8_erratum_fix
2672 {
2673 bfd *input_bfd;
2674 asection *section;
2675 bfd_vma offset;
2676 bfd_vma addend;
2677 unsigned long orig_insn;
2678 char *stub_name;
2679 enum elf32_arm_stub_type stub_type;
2680 enum arm_st_branch_type branch_type;
2681 };
2682
2683 /* A table of relocs applied to branches which might trigger Cortex-A8
2684 erratum. */
2685
2686 struct a8_erratum_reloc
2687 {
2688 bfd_vma from;
2689 bfd_vma destination;
2690 struct elf32_arm_link_hash_entry *hash;
2691 const char *sym_name;
2692 unsigned int r_type;
2693 enum arm_st_branch_type branch_type;
2694 bfd_boolean non_a8_stub;
2695 };
2696
2697 /* The size of the thread control block. */
2698 #define TCB_SIZE 8
2699
2700 /* ARM-specific information about a PLT entry, over and above the usual
2701 gotplt_union. */
2702 struct arm_plt_info
2703 {
2704 /* We reference count Thumb references to a PLT entry separately,
2705 so that we can emit the Thumb trampoline only if needed. */
2706 bfd_signed_vma thumb_refcount;
2707
2708 /* Some references from Thumb code may be eliminated by BL->BLX
2709 conversion, so record them separately. */
2710 bfd_signed_vma maybe_thumb_refcount;
2711
2712 /* How many of the recorded PLT accesses were from non-call relocations.
2713 This information is useful when deciding whether anything takes the
2714 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2715 non-call references to the function should resolve directly to the
2716 real runtime target. */
2717 unsigned int noncall_refcount;
2718
2719 /* Since PLT entries have variable size if the Thumb prologue is
2720 used, we need to record the index into .got.plt instead of
2721 recomputing it from the PLT offset. */
2722 bfd_signed_vma got_offset;
2723 };
2724
2725 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2726 struct arm_local_iplt_info
2727 {
2728 /* The information that is usually found in the generic ELF part of
2729 the hash table entry. */
2730 union gotplt_union root;
2731
2732 /* The information that is usually found in the ARM-specific part of
2733 the hash table entry. */
2734 struct arm_plt_info arm;
2735
2736 /* A list of all potential dynamic relocations against this symbol. */
2737 struct elf_dyn_relocs *dyn_relocs;
2738 };
2739
2740 struct elf_arm_obj_tdata
2741 {
2742 struct elf_obj_tdata root;
2743
2744 /* tls_type for each local got entry. */
2745 char *local_got_tls_type;
2746
2747 /* GOTPLT entries for TLS descriptors. */
2748 bfd_vma *local_tlsdesc_gotent;
2749
2750 /* Information for local symbols that need entries in .iplt. */
2751 struct arm_local_iplt_info **local_iplt;
2752
2753 /* Zero to warn when linking objects with incompatible enum sizes. */
2754 int no_enum_size_warning;
2755
2756 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2757 int no_wchar_size_warning;
2758 };
2759
2760 #define elf_arm_tdata(bfd) \
2761 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2762
2763 #define elf32_arm_local_got_tls_type(bfd) \
2764 (elf_arm_tdata (bfd)->local_got_tls_type)
2765
2766 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2767 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2768
2769 #define elf32_arm_local_iplt(bfd) \
2770 (elf_arm_tdata (bfd)->local_iplt)
2771
2772 #define is_arm_elf(bfd) \
2773 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2774 && elf_tdata (bfd) != NULL \
2775 && elf_object_id (bfd) == ARM_ELF_DATA)
2776
2777 static bfd_boolean
2778 elf32_arm_mkobject (bfd *abfd)
2779 {
2780 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2781 ARM_ELF_DATA);
2782 }
2783
2784 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2785
2786 /* Arm ELF linker hash entry. */
2787 struct elf32_arm_link_hash_entry
2788 {
2789 struct elf_link_hash_entry root;
2790
2791 /* Track dynamic relocs copied for this symbol. */
2792 struct elf_dyn_relocs *dyn_relocs;
2793
2794 /* ARM-specific PLT information. */
2795 struct arm_plt_info plt;
2796
2797 #define GOT_UNKNOWN 0
2798 #define GOT_NORMAL 1
2799 #define GOT_TLS_GD 2
2800 #define GOT_TLS_IE 4
2801 #define GOT_TLS_GDESC 8
2802 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2803 unsigned int tls_type : 8;
2804
2805 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2806 unsigned int is_iplt : 1;
2807
2808 unsigned int unused : 23;
2809
2810 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2811 starting at the end of the jump table. */
2812 bfd_vma tlsdesc_got;
2813
2814 /* The symbol marking the real symbol location for exported thumb
2815 symbols with Arm stubs. */
2816 struct elf_link_hash_entry *export_glue;
2817
2818 /* A pointer to the most recently used stub hash entry against this
2819 symbol. */
2820 struct elf32_arm_stub_hash_entry *stub_cache;
2821 };
2822
2823 /* Traverse an arm ELF linker hash table. */
2824 #define elf32_arm_link_hash_traverse(table, func, info) \
2825 (elf_link_hash_traverse \
2826 (&(table)->root, \
2827 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2828 (info)))
2829
2830 /* Get the ARM elf linker hash table from a link_info structure. */
2831 #define elf32_arm_hash_table(info) \
2832 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2833 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2834
2835 #define arm_stub_hash_lookup(table, string, create, copy) \
2836 ((struct elf32_arm_stub_hash_entry *) \
2837 bfd_hash_lookup ((table), (string), (create), (copy)))
2838
2839 /* Array to keep track of which stub sections have been created, and
2840 information on stub grouping. */
2841 struct map_stub
2842 {
2843 /* This is the section to which stubs in the group will be
2844 attached. */
2845 asection *link_sec;
2846 /* The stub section. */
2847 asection *stub_sec;
2848 };
2849
2850 #define elf32_arm_compute_jump_table_size(htab) \
2851 ((htab)->next_tls_desc_index * 4)
2852
2853 /* ARM ELF linker hash table. */
2854 struct elf32_arm_link_hash_table
2855 {
2856 /* The main hash table. */
2857 struct elf_link_hash_table root;
2858
2859 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2860 bfd_size_type thumb_glue_size;
2861
2862 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2863 bfd_size_type arm_glue_size;
2864
2865 /* The size in bytes of section containing the ARMv4 BX veneers. */
2866 bfd_size_type bx_glue_size;
2867
2868 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2869 veneer has been populated. */
2870 bfd_vma bx_glue_offset[15];
2871
2872 /* The size in bytes of the section containing glue for VFP11 erratum
2873 veneers. */
2874 bfd_size_type vfp11_erratum_glue_size;
2875
2876 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2877 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2878 elf32_arm_write_section(). */
2879 struct a8_erratum_fix *a8_erratum_fixes;
2880 unsigned int num_a8_erratum_fixes;
2881
2882 /* An arbitrary input BFD chosen to hold the glue sections. */
2883 bfd * bfd_of_glue_owner;
2884
2885 /* Nonzero to output a BE8 image. */
2886 int byteswap_code;
2887
2888 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2889 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2890 int target1_is_rel;
2891
2892 /* The relocation to use for R_ARM_TARGET2 relocations. */
2893 int target2_reloc;
2894
2895 /* 0 = Ignore R_ARM_V4BX.
2896 1 = Convert BX to MOV PC.
2897 2 = Generate v4 interworing stubs. */
2898 int fix_v4bx;
2899
2900 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2901 int fix_cortex_a8;
2902
2903 /* Whether we should fix the ARM1176 BLX immediate issue. */
2904 int fix_arm1176;
2905
2906 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2907 int use_blx;
2908
2909 /* What sort of code sequences we should look for which may trigger the
2910 VFP11 denorm erratum. */
2911 bfd_arm_vfp11_fix vfp11_fix;
2912
2913 /* Global counter for the number of fixes we have emitted. */
2914 int num_vfp11_fixes;
2915
2916 /* Nonzero to force PIC branch veneers. */
2917 int pic_veneer;
2918
2919 /* The number of bytes in the initial entry in the PLT. */
2920 bfd_size_type plt_header_size;
2921
2922 /* The number of bytes in the subsequent PLT etries. */
2923 bfd_size_type plt_entry_size;
2924
2925 /* True if the target system is VxWorks. */
2926 int vxworks_p;
2927
2928 /* True if the target system is Symbian OS. */
2929 int symbian_p;
2930
2931 /* True if the target system is Native Client. */
2932 int nacl_p;
2933
2934 /* True if the target uses REL relocations. */
2935 int use_rel;
2936
2937 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
2938 bfd_vma next_tls_desc_index;
2939
2940 /* How many R_ARM_TLS_DESC relocations were generated so far. */
2941 bfd_vma num_tls_desc;
2942
2943 /* Short-cuts to get to dynamic linker sections. */
2944 asection *sdynbss;
2945 asection *srelbss;
2946
2947 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2948 asection *srelplt2;
2949
2950 /* The offset into splt of the PLT entry for the TLS descriptor
2951 resolver. Special values are 0, if not necessary (or not found
2952 to be necessary yet), and -1 if needed but not determined
2953 yet. */
2954 bfd_vma dt_tlsdesc_plt;
2955
2956 /* The offset into sgot of the GOT entry used by the PLT entry
2957 above. */
2958 bfd_vma dt_tlsdesc_got;
2959
2960 /* Offset in .plt section of tls_arm_trampoline. */
2961 bfd_vma tls_trampoline;
2962
2963 /* Data for R_ARM_TLS_LDM32 relocations. */
2964 union
2965 {
2966 bfd_signed_vma refcount;
2967 bfd_vma offset;
2968 } tls_ldm_got;
2969
2970 /* Small local sym cache. */
2971 struct sym_cache sym_cache;
2972
2973 /* For convenience in allocate_dynrelocs. */
2974 bfd * obfd;
2975
2976 /* The amount of space used by the reserved portion of the sgotplt
2977 section, plus whatever space is used by the jump slots. */
2978 bfd_vma sgotplt_jump_table_size;
2979
2980 /* The stub hash table. */
2981 struct bfd_hash_table stub_hash_table;
2982
2983 /* Linker stub bfd. */
2984 bfd *stub_bfd;
2985
2986 /* Linker call-backs. */
2987 asection * (*add_stub_section) (const char *, asection *);
2988 void (*layout_sections_again) (void);
2989
2990 /* Array to keep track of which stub sections have been created, and
2991 information on stub grouping. */
2992 struct map_stub *stub_group;
2993
2994 /* Number of elements in stub_group. */
2995 int top_id;
2996
2997 /* Assorted information used by elf32_arm_size_stubs. */
2998 unsigned int bfd_count;
2999 int top_index;
3000 asection **input_list;
3001 };
3002
3003 /* Create an entry in an ARM ELF linker hash table. */
3004
3005 static struct bfd_hash_entry *
3006 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3007 struct bfd_hash_table * table,
3008 const char * string)
3009 {
3010 struct elf32_arm_link_hash_entry * ret =
3011 (struct elf32_arm_link_hash_entry *) entry;
3012
3013 /* Allocate the structure if it has not already been allocated by a
3014 subclass. */
3015 if (ret == NULL)
3016 ret = (struct elf32_arm_link_hash_entry *)
3017 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3018 if (ret == NULL)
3019 return (struct bfd_hash_entry *) ret;
3020
3021 /* Call the allocation method of the superclass. */
3022 ret = ((struct elf32_arm_link_hash_entry *)
3023 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3024 table, string));
3025 if (ret != NULL)
3026 {
3027 ret->dyn_relocs = NULL;
3028 ret->tls_type = GOT_UNKNOWN;
3029 ret->tlsdesc_got = (bfd_vma) -1;
3030 ret->plt.thumb_refcount = 0;
3031 ret->plt.maybe_thumb_refcount = 0;
3032 ret->plt.noncall_refcount = 0;
3033 ret->plt.got_offset = -1;
3034 ret->is_iplt = FALSE;
3035 ret->export_glue = NULL;
3036
3037 ret->stub_cache = NULL;
3038 }
3039
3040 return (struct bfd_hash_entry *) ret;
3041 }
3042
3043 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3044 symbols. */
3045
3046 static bfd_boolean
3047 elf32_arm_allocate_local_sym_info (bfd *abfd)
3048 {
3049 if (elf_local_got_refcounts (abfd) == NULL)
3050 {
3051 bfd_size_type num_syms;
3052 bfd_size_type size;
3053 char *data;
3054
3055 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3056 size = num_syms * (sizeof (bfd_signed_vma)
3057 + sizeof (struct arm_local_iplt_info *)
3058 + sizeof (bfd_vma)
3059 + sizeof (char));
3060 data = bfd_zalloc (abfd, size);
3061 if (data == NULL)
3062 return FALSE;
3063
3064 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3065 data += num_syms * sizeof (bfd_signed_vma);
3066
3067 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3068 data += num_syms * sizeof (struct arm_local_iplt_info *);
3069
3070 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3071 data += num_syms * sizeof (bfd_vma);
3072
3073 elf32_arm_local_got_tls_type (abfd) = data;
3074 }
3075 return TRUE;
3076 }
3077
3078 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3079 to input bfd ABFD. Create the information if it doesn't already exist.
3080 Return null if an allocation fails. */
3081
3082 static struct arm_local_iplt_info *
3083 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3084 {
3085 struct arm_local_iplt_info **ptr;
3086
3087 if (!elf32_arm_allocate_local_sym_info (abfd))
3088 return NULL;
3089
3090 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3091 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3092 if (*ptr == NULL)
3093 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3094 return *ptr;
3095 }
3096
3097 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3098 in ABFD's symbol table. If the symbol is global, H points to its
3099 hash table entry, otherwise H is null.
3100
3101 Return true if the symbol does have PLT information. When returning
3102 true, point *ROOT_PLT at the target-independent reference count/offset
3103 union and *ARM_PLT at the ARM-specific information. */
3104
3105 static bfd_boolean
3106 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3107 unsigned long r_symndx, union gotplt_union **root_plt,
3108 struct arm_plt_info **arm_plt)
3109 {
3110 struct arm_local_iplt_info *local_iplt;
3111
3112 if (h != NULL)
3113 {
3114 *root_plt = &h->root.plt;
3115 *arm_plt = &h->plt;
3116 return TRUE;
3117 }
3118
3119 if (elf32_arm_local_iplt (abfd) == NULL)
3120 return FALSE;
3121
3122 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3123 if (local_iplt == NULL)
3124 return FALSE;
3125
3126 *root_plt = &local_iplt->root;
3127 *arm_plt = &local_iplt->arm;
3128 return TRUE;
3129 }
3130
3131 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3132 before it. */
3133
3134 static bfd_boolean
3135 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3136 struct arm_plt_info *arm_plt)
3137 {
3138 struct elf32_arm_link_hash_table *htab;
3139
3140 htab = elf32_arm_hash_table (info);
3141 return (arm_plt->thumb_refcount != 0
3142 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3143 }
3144
3145 /* Return a pointer to the head of the dynamic reloc list that should
3146 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3147 ABFD's symbol table. Return null if an error occurs. */
3148
3149 static struct elf_dyn_relocs **
3150 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3151 Elf_Internal_Sym *isym)
3152 {
3153 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3154 {
3155 struct arm_local_iplt_info *local_iplt;
3156
3157 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3158 if (local_iplt == NULL)
3159 return NULL;
3160 return &local_iplt->dyn_relocs;
3161 }
3162 else
3163 {
3164 /* Track dynamic relocs needed for local syms too.
3165 We really need local syms available to do this
3166 easily. Oh well. */
3167 asection *s;
3168 void *vpp;
3169
3170 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3171 if (s == NULL)
3172 abort ();
3173
3174 vpp = &elf_section_data (s)->local_dynrel;
3175 return (struct elf_dyn_relocs **) vpp;
3176 }
3177 }
3178
3179 /* Initialize an entry in the stub hash table. */
3180
3181 static struct bfd_hash_entry *
3182 stub_hash_newfunc (struct bfd_hash_entry *entry,
3183 struct bfd_hash_table *table,
3184 const char *string)
3185 {
3186 /* Allocate the structure if it has not already been allocated by a
3187 subclass. */
3188 if (entry == NULL)
3189 {
3190 entry = (struct bfd_hash_entry *)
3191 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3192 if (entry == NULL)
3193 return entry;
3194 }
3195
3196 /* Call the allocation method of the superclass. */
3197 entry = bfd_hash_newfunc (entry, table, string);
3198 if (entry != NULL)
3199 {
3200 struct elf32_arm_stub_hash_entry *eh;
3201
3202 /* Initialize the local fields. */
3203 eh = (struct elf32_arm_stub_hash_entry *) entry;
3204 eh->stub_sec = NULL;
3205 eh->stub_offset = 0;
3206 eh->target_value = 0;
3207 eh->target_section = NULL;
3208 eh->target_addend = 0;
3209 eh->orig_insn = 0;
3210 eh->stub_type = arm_stub_none;
3211 eh->stub_size = 0;
3212 eh->stub_template = NULL;
3213 eh->stub_template_size = 0;
3214 eh->h = NULL;
3215 eh->id_sec = NULL;
3216 eh->output_name = NULL;
3217 }
3218
3219 return entry;
3220 }
3221
3222 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3223 shortcuts to them in our hash table. */
3224
3225 static bfd_boolean
3226 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3227 {
3228 struct elf32_arm_link_hash_table *htab;
3229
3230 htab = elf32_arm_hash_table (info);
3231 if (htab == NULL)
3232 return FALSE;
3233
3234 /* BPABI objects never have a GOT, or associated sections. */
3235 if (htab->symbian_p)
3236 return TRUE;
3237
3238 if (! _bfd_elf_create_got_section (dynobj, info))
3239 return FALSE;
3240
3241 return TRUE;
3242 }
3243
3244 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3245
3246 static bfd_boolean
3247 create_ifunc_sections (struct bfd_link_info *info)
3248 {
3249 struct elf32_arm_link_hash_table *htab;
3250 const struct elf_backend_data *bed;
3251 bfd *dynobj;
3252 asection *s;
3253 flagword flags;
3254
3255 htab = elf32_arm_hash_table (info);
3256 dynobj = htab->root.dynobj;
3257 bed = get_elf_backend_data (dynobj);
3258 flags = bed->dynamic_sec_flags;
3259
3260 if (htab->root.iplt == NULL)
3261 {
3262 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3263 flags | SEC_READONLY | SEC_CODE);
3264 if (s == NULL
3265 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3266 return FALSE;
3267 htab->root.iplt = s;
3268 }
3269
3270 if (htab->root.irelplt == NULL)
3271 {
3272 s = bfd_make_section_anyway_with_flags (dynobj,
3273 RELOC_SECTION (htab, ".iplt"),
3274 flags | SEC_READONLY);
3275 if (s == NULL
3276 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3277 return FALSE;
3278 htab->root.irelplt = s;
3279 }
3280
3281 if (htab->root.igotplt == NULL)
3282 {
3283 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3284 if (s == NULL
3285 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3286 return FALSE;
3287 htab->root.igotplt = s;
3288 }
3289 return TRUE;
3290 }
3291
3292 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3293 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3294 hash table. */
3295
3296 static bfd_boolean
3297 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3298 {
3299 struct elf32_arm_link_hash_table *htab;
3300
3301 htab = elf32_arm_hash_table (info);
3302 if (htab == NULL)
3303 return FALSE;
3304
3305 if (!htab->root.sgot && !create_got_section (dynobj, info))
3306 return FALSE;
3307
3308 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3309 return FALSE;
3310
3311 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3312 if (!info->shared)
3313 htab->srelbss = bfd_get_linker_section (dynobj,
3314 RELOC_SECTION (htab, ".bss"));
3315
3316 if (htab->vxworks_p)
3317 {
3318 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3319 return FALSE;
3320
3321 if (info->shared)
3322 {
3323 htab->plt_header_size = 0;
3324 htab->plt_entry_size
3325 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3326 }
3327 else
3328 {
3329 htab->plt_header_size
3330 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3331 htab->plt_entry_size
3332 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3333 }
3334 }
3335
3336 if (!htab->root.splt
3337 || !htab->root.srelplt
3338 || !htab->sdynbss
3339 || (!info->shared && !htab->srelbss))
3340 abort ();
3341
3342 return TRUE;
3343 }
3344
3345 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3346
3347 static void
3348 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3349 struct elf_link_hash_entry *dir,
3350 struct elf_link_hash_entry *ind)
3351 {
3352 struct elf32_arm_link_hash_entry *edir, *eind;
3353
3354 edir = (struct elf32_arm_link_hash_entry *) dir;
3355 eind = (struct elf32_arm_link_hash_entry *) ind;
3356
3357 if (eind->dyn_relocs != NULL)
3358 {
3359 if (edir->dyn_relocs != NULL)
3360 {
3361 struct elf_dyn_relocs **pp;
3362 struct elf_dyn_relocs *p;
3363
3364 /* Add reloc counts against the indirect sym to the direct sym
3365 list. Merge any entries against the same section. */
3366 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3367 {
3368 struct elf_dyn_relocs *q;
3369
3370 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3371 if (q->sec == p->sec)
3372 {
3373 q->pc_count += p->pc_count;
3374 q->count += p->count;
3375 *pp = p->next;
3376 break;
3377 }
3378 if (q == NULL)
3379 pp = &p->next;
3380 }
3381 *pp = edir->dyn_relocs;
3382 }
3383
3384 edir->dyn_relocs = eind->dyn_relocs;
3385 eind->dyn_relocs = NULL;
3386 }
3387
3388 if (ind->root.type == bfd_link_hash_indirect)
3389 {
3390 /* Copy over PLT info. */
3391 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3392 eind->plt.thumb_refcount = 0;
3393 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3394 eind->plt.maybe_thumb_refcount = 0;
3395 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3396 eind->plt.noncall_refcount = 0;
3397
3398 /* We should only allocate a function to .iplt once the final
3399 symbol information is known. */
3400 BFD_ASSERT (!eind->is_iplt);
3401
3402 if (dir->got.refcount <= 0)
3403 {
3404 edir->tls_type = eind->tls_type;
3405 eind->tls_type = GOT_UNKNOWN;
3406 }
3407 }
3408
3409 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3410 }
3411
3412 /* Create an ARM elf linker hash table. */
3413
3414 static struct bfd_link_hash_table *
3415 elf32_arm_link_hash_table_create (bfd *abfd)
3416 {
3417 struct elf32_arm_link_hash_table *ret;
3418 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3419
3420 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
3421 if (ret == NULL)
3422 return NULL;
3423
3424 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3425 elf32_arm_link_hash_newfunc,
3426 sizeof (struct elf32_arm_link_hash_entry),
3427 ARM_ELF_DATA))
3428 {
3429 free (ret);
3430 return NULL;
3431 }
3432
3433 ret->sdynbss = NULL;
3434 ret->srelbss = NULL;
3435 ret->srelplt2 = NULL;
3436 ret->dt_tlsdesc_plt = 0;
3437 ret->dt_tlsdesc_got = 0;
3438 ret->tls_trampoline = 0;
3439 ret->next_tls_desc_index = 0;
3440 ret->num_tls_desc = 0;
3441 ret->thumb_glue_size = 0;
3442 ret->arm_glue_size = 0;
3443 ret->bx_glue_size = 0;
3444 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
3445 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3446 ret->vfp11_erratum_glue_size = 0;
3447 ret->num_vfp11_fixes = 0;
3448 ret->fix_cortex_a8 = 0;
3449 ret->fix_arm1176 = 0;
3450 ret->bfd_of_glue_owner = NULL;
3451 ret->byteswap_code = 0;
3452 ret->target1_is_rel = 0;
3453 ret->target2_reloc = R_ARM_NONE;
3454 #ifdef FOUR_WORD_PLT
3455 ret->plt_header_size = 16;
3456 ret->plt_entry_size = 16;
3457 #else
3458 ret->plt_header_size = 20;
3459 ret->plt_entry_size = 12;
3460 #endif
3461 ret->fix_v4bx = 0;
3462 ret->use_blx = 0;
3463 ret->vxworks_p = 0;
3464 ret->symbian_p = 0;
3465 ret->nacl_p = 0;
3466 ret->use_rel = 1;
3467 ret->sym_cache.abfd = NULL;
3468 ret->obfd = abfd;
3469 ret->tls_ldm_got.refcount = 0;
3470 ret->stub_bfd = NULL;
3471 ret->add_stub_section = NULL;
3472 ret->layout_sections_again = NULL;
3473 ret->stub_group = NULL;
3474 ret->top_id = 0;
3475 ret->bfd_count = 0;
3476 ret->top_index = 0;
3477 ret->input_list = NULL;
3478
3479 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3480 sizeof (struct elf32_arm_stub_hash_entry)))
3481 {
3482 free (ret);
3483 return NULL;
3484 }
3485
3486 return &ret->root.root;
3487 }
3488
3489 /* Free the derived linker hash table. */
3490
3491 static void
3492 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
3493 {
3494 struct elf32_arm_link_hash_table *ret
3495 = (struct elf32_arm_link_hash_table *) hash;
3496
3497 bfd_hash_table_free (&ret->stub_hash_table);
3498 _bfd_generic_link_hash_table_free (hash);
3499 }
3500
3501 /* Determine if we're dealing with a Thumb only architecture. */
3502
3503 static bfd_boolean
3504 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3505 {
3506 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3507 Tag_CPU_arch);
3508 int profile;
3509
3510 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3511 return TRUE;
3512
3513 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3514 return FALSE;
3515
3516 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3517 Tag_CPU_arch_profile);
3518
3519 return profile == 'M';
3520 }
3521
3522 /* Determine if we're dealing with a Thumb-2 object. */
3523
3524 static bfd_boolean
3525 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3526 {
3527 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3528 Tag_CPU_arch);
3529 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3530 }
3531
3532 /* Determine what kind of NOPs are available. */
3533
3534 static bfd_boolean
3535 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3536 {
3537 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3538 Tag_CPU_arch);
3539 return arch == TAG_CPU_ARCH_V6T2
3540 || arch == TAG_CPU_ARCH_V6K
3541 || arch == TAG_CPU_ARCH_V7
3542 || arch == TAG_CPU_ARCH_V7E_M;
3543 }
3544
3545 static bfd_boolean
3546 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3547 {
3548 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3549 Tag_CPU_arch);
3550 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3551 || arch == TAG_CPU_ARCH_V7E_M);
3552 }
3553
3554 static bfd_boolean
3555 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3556 {
3557 switch (stub_type)
3558 {
3559 case arm_stub_long_branch_thumb_only:
3560 case arm_stub_long_branch_v4t_thumb_arm:
3561 case arm_stub_short_branch_v4t_thumb_arm:
3562 case arm_stub_long_branch_v4t_thumb_arm_pic:
3563 case arm_stub_long_branch_v4t_thumb_tls_pic:
3564 case arm_stub_long_branch_thumb_only_pic:
3565 return TRUE;
3566 case arm_stub_none:
3567 BFD_FAIL ();
3568 return FALSE;
3569 break;
3570 default:
3571 return FALSE;
3572 }
3573 }
3574
3575 /* Determine the type of stub needed, if any, for a call. */
3576
3577 static enum elf32_arm_stub_type
3578 arm_type_of_stub (struct bfd_link_info *info,
3579 asection *input_sec,
3580 const Elf_Internal_Rela *rel,
3581 unsigned char st_type,
3582 enum arm_st_branch_type *actual_branch_type,
3583 struct elf32_arm_link_hash_entry *hash,
3584 bfd_vma destination,
3585 asection *sym_sec,
3586 bfd *input_bfd,
3587 const char *name)
3588 {
3589 bfd_vma location;
3590 bfd_signed_vma branch_offset;
3591 unsigned int r_type;
3592 struct elf32_arm_link_hash_table * globals;
3593 int thumb2;
3594 int thumb_only;
3595 enum elf32_arm_stub_type stub_type = arm_stub_none;
3596 int use_plt = 0;
3597 enum arm_st_branch_type branch_type = *actual_branch_type;
3598 union gotplt_union *root_plt;
3599 struct arm_plt_info *arm_plt;
3600
3601 if (branch_type == ST_BRANCH_LONG)
3602 return stub_type;
3603
3604 globals = elf32_arm_hash_table (info);
3605 if (globals == NULL)
3606 return stub_type;
3607
3608 thumb_only = using_thumb_only (globals);
3609
3610 thumb2 = using_thumb2 (globals);
3611
3612 /* Determine where the call point is. */
3613 location = (input_sec->output_offset
3614 + input_sec->output_section->vma
3615 + rel->r_offset);
3616
3617 r_type = ELF32_R_TYPE (rel->r_info);
3618
3619 /* For TLS call relocs, it is the caller's responsibility to provide
3620 the address of the appropriate trampoline. */
3621 if (r_type != R_ARM_TLS_CALL
3622 && r_type != R_ARM_THM_TLS_CALL
3623 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3624 &root_plt, &arm_plt)
3625 && root_plt->offset != (bfd_vma) -1)
3626 {
3627 asection *splt;
3628
3629 if (hash == NULL || hash->is_iplt)
3630 splt = globals->root.iplt;
3631 else
3632 splt = globals->root.splt;
3633 if (splt != NULL)
3634 {
3635 use_plt = 1;
3636
3637 /* Note when dealing with PLT entries: the main PLT stub is in
3638 ARM mode, so if the branch is in Thumb mode, another
3639 Thumb->ARM stub will be inserted later just before the ARM
3640 PLT stub. We don't take this extra distance into account
3641 here, because if a long branch stub is needed, we'll add a
3642 Thumb->Arm one and branch directly to the ARM PLT entry
3643 because it avoids spreading offset corrections in several
3644 places. */
3645
3646 destination = (splt->output_section->vma
3647 + splt->output_offset
3648 + root_plt->offset);
3649 st_type = STT_FUNC;
3650 branch_type = ST_BRANCH_TO_ARM;
3651 }
3652 }
3653 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3654 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3655
3656 branch_offset = (bfd_signed_vma)(destination - location);
3657
3658 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3659 || r_type == R_ARM_THM_TLS_CALL)
3660 {
3661 /* Handle cases where:
3662 - this call goes too far (different Thumb/Thumb2 max
3663 distance)
3664 - it's a Thumb->Arm call and blx is not available, or it's a
3665 Thumb->Arm branch (not bl). A stub is needed in this case,
3666 but only if this call is not through a PLT entry. Indeed,
3667 PLT stubs handle mode switching already.
3668 */
3669 if ((!thumb2
3670 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3671 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3672 || (thumb2
3673 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3674 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3675 || (branch_type == ST_BRANCH_TO_ARM
3676 && (((r_type == R_ARM_THM_CALL
3677 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3678 || (r_type == R_ARM_THM_JUMP24))
3679 && !use_plt))
3680 {
3681 if (branch_type == ST_BRANCH_TO_THUMB)
3682 {
3683 /* Thumb to thumb. */
3684 if (!thumb_only)
3685 {
3686 stub_type = (info->shared | globals->pic_veneer)
3687 /* PIC stubs. */
3688 ? ((globals->use_blx
3689 && (r_type == R_ARM_THM_CALL))
3690 /* V5T and above. Stub starts with ARM code, so
3691 we must be able to switch mode before
3692 reaching it, which is only possible for 'bl'
3693 (ie R_ARM_THM_CALL relocation). */
3694 ? arm_stub_long_branch_any_thumb_pic
3695 /* On V4T, use Thumb code only. */
3696 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3697
3698 /* non-PIC stubs. */
3699 : ((globals->use_blx
3700 && (r_type == R_ARM_THM_CALL))
3701 /* V5T and above. */
3702 ? arm_stub_long_branch_any_any
3703 /* V4T. */
3704 : arm_stub_long_branch_v4t_thumb_thumb);
3705 }
3706 else
3707 {
3708 stub_type = (info->shared | globals->pic_veneer)
3709 /* PIC stub. */
3710 ? arm_stub_long_branch_thumb_only_pic
3711 /* non-PIC stub. */
3712 : arm_stub_long_branch_thumb_only;
3713 }
3714 }
3715 else
3716 {
3717 /* Thumb to arm. */
3718 if (sym_sec != NULL
3719 && sym_sec->owner != NULL
3720 && !INTERWORK_FLAG (sym_sec->owner))
3721 {
3722 (*_bfd_error_handler)
3723 (_("%B(%s): warning: interworking not enabled.\n"
3724 " first occurrence: %B: Thumb call to ARM"),
3725 sym_sec->owner, input_bfd, name);
3726 }
3727
3728 stub_type =
3729 (info->shared | globals->pic_veneer)
3730 /* PIC stubs. */
3731 ? (r_type == R_ARM_THM_TLS_CALL
3732 /* TLS PIC stubs */
3733 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3734 : arm_stub_long_branch_v4t_thumb_tls_pic)
3735 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3736 /* V5T PIC and above. */
3737 ? arm_stub_long_branch_any_arm_pic
3738 /* V4T PIC stub. */
3739 : arm_stub_long_branch_v4t_thumb_arm_pic))
3740
3741 /* non-PIC stubs. */
3742 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3743 /* V5T and above. */
3744 ? arm_stub_long_branch_any_any
3745 /* V4T. */
3746 : arm_stub_long_branch_v4t_thumb_arm);
3747
3748 /* Handle v4t short branches. */
3749 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3750 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3751 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3752 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3753 }
3754 }
3755 }
3756 else if (r_type == R_ARM_CALL
3757 || r_type == R_ARM_JUMP24
3758 || r_type == R_ARM_PLT32
3759 || r_type == R_ARM_TLS_CALL)
3760 {
3761 if (branch_type == ST_BRANCH_TO_THUMB)
3762 {
3763 /* Arm to thumb. */
3764
3765 if (sym_sec != NULL
3766 && sym_sec->owner != NULL
3767 && !INTERWORK_FLAG (sym_sec->owner))
3768 {
3769 (*_bfd_error_handler)
3770 (_("%B(%s): warning: interworking not enabled.\n"
3771 " first occurrence: %B: ARM call to Thumb"),
3772 sym_sec->owner, input_bfd, name);
3773 }
3774
3775 /* We have an extra 2-bytes reach because of
3776 the mode change (bit 24 (H) of BLX encoding). */
3777 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3778 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3779 || (r_type == R_ARM_CALL && !globals->use_blx)
3780 || (r_type == R_ARM_JUMP24)
3781 || (r_type == R_ARM_PLT32))
3782 {
3783 stub_type = (info->shared | globals->pic_veneer)
3784 /* PIC stubs. */
3785 ? ((globals->use_blx)
3786 /* V5T and above. */
3787 ? arm_stub_long_branch_any_thumb_pic
3788 /* V4T stub. */
3789 : arm_stub_long_branch_v4t_arm_thumb_pic)
3790
3791 /* non-PIC stubs. */
3792 : ((globals->use_blx)
3793 /* V5T and above. */
3794 ? arm_stub_long_branch_any_any
3795 /* V4T. */
3796 : arm_stub_long_branch_v4t_arm_thumb);
3797 }
3798 }
3799 else
3800 {
3801 /* Arm to arm. */
3802 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3803 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3804 {
3805 stub_type =
3806 (info->shared | globals->pic_veneer)
3807 /* PIC stubs. */
3808 ? (r_type == R_ARM_TLS_CALL
3809 /* TLS PIC Stub */
3810 ? arm_stub_long_branch_any_tls_pic
3811 : arm_stub_long_branch_any_arm_pic)
3812 /* non-PIC stubs. */
3813 : arm_stub_long_branch_any_any;
3814 }
3815 }
3816 }
3817
3818 /* If a stub is needed, record the actual destination type. */
3819 if (stub_type != arm_stub_none)
3820 *actual_branch_type = branch_type;
3821
3822 return stub_type;
3823 }
3824
3825 /* Build a name for an entry in the stub hash table. */
3826
3827 static char *
3828 elf32_arm_stub_name (const asection *input_section,
3829 const asection *sym_sec,
3830 const struct elf32_arm_link_hash_entry *hash,
3831 const Elf_Internal_Rela *rel,
3832 enum elf32_arm_stub_type stub_type)
3833 {
3834 char *stub_name;
3835 bfd_size_type len;
3836
3837 if (hash)
3838 {
3839 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3840 stub_name = (char *) bfd_malloc (len);
3841 if (stub_name != NULL)
3842 sprintf (stub_name, "%08x_%s+%x_%d",
3843 input_section->id & 0xffffffff,
3844 hash->root.root.root.string,
3845 (int) rel->r_addend & 0xffffffff,
3846 (int) stub_type);
3847 }
3848 else
3849 {
3850 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3851 stub_name = (char *) bfd_malloc (len);
3852 if (stub_name != NULL)
3853 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3854 input_section->id & 0xffffffff,
3855 sym_sec->id & 0xffffffff,
3856 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3857 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3858 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3859 (int) rel->r_addend & 0xffffffff,
3860 (int) stub_type);
3861 }
3862
3863 return stub_name;
3864 }
3865
3866 /* Look up an entry in the stub hash. Stub entries are cached because
3867 creating the stub name takes a bit of time. */
3868
3869 static struct elf32_arm_stub_hash_entry *
3870 elf32_arm_get_stub_entry (const asection *input_section,
3871 const asection *sym_sec,
3872 struct elf_link_hash_entry *hash,
3873 const Elf_Internal_Rela *rel,
3874 struct elf32_arm_link_hash_table *htab,
3875 enum elf32_arm_stub_type stub_type)
3876 {
3877 struct elf32_arm_stub_hash_entry *stub_entry;
3878 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3879 const asection *id_sec;
3880
3881 if ((input_section->flags & SEC_CODE) == 0)
3882 return NULL;
3883
3884 /* If this input section is part of a group of sections sharing one
3885 stub section, then use the id of the first section in the group.
3886 Stub names need to include a section id, as there may well be
3887 more than one stub used to reach say, printf, and we need to
3888 distinguish between them. */
3889 id_sec = htab->stub_group[input_section->id].link_sec;
3890
3891 if (h != NULL && h->stub_cache != NULL
3892 && h->stub_cache->h == h
3893 && h->stub_cache->id_sec == id_sec
3894 && h->stub_cache->stub_type == stub_type)
3895 {
3896 stub_entry = h->stub_cache;
3897 }
3898 else
3899 {
3900 char *stub_name;
3901
3902 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3903 if (stub_name == NULL)
3904 return NULL;
3905
3906 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3907 stub_name, FALSE, FALSE);
3908 if (h != NULL)
3909 h->stub_cache = stub_entry;
3910
3911 free (stub_name);
3912 }
3913
3914 return stub_entry;
3915 }
3916
3917 /* Find or create a stub section. Returns a pointer to the stub section, and
3918 the section to which the stub section will be attached (in *LINK_SEC_P).
3919 LINK_SEC_P may be NULL. */
3920
3921 static asection *
3922 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3923 struct elf32_arm_link_hash_table *htab)
3924 {
3925 asection *link_sec;
3926 asection *stub_sec;
3927
3928 link_sec = htab->stub_group[section->id].link_sec;
3929 BFD_ASSERT (link_sec != NULL);
3930 stub_sec = htab->stub_group[section->id].stub_sec;
3931
3932 if (stub_sec == NULL)
3933 {
3934 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3935 if (stub_sec == NULL)
3936 {
3937 size_t namelen;
3938 bfd_size_type len;
3939 char *s_name;
3940
3941 namelen = strlen (link_sec->name);
3942 len = namelen + sizeof (STUB_SUFFIX);
3943 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3944 if (s_name == NULL)
3945 return NULL;
3946
3947 memcpy (s_name, link_sec->name, namelen);
3948 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3949 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3950 if (stub_sec == NULL)
3951 return NULL;
3952 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3953 }
3954 htab->stub_group[section->id].stub_sec = stub_sec;
3955 }
3956
3957 if (link_sec_p)
3958 *link_sec_p = link_sec;
3959
3960 return stub_sec;
3961 }
3962
3963 /* Add a new stub entry to the stub hash. Not all fields of the new
3964 stub entry are initialised. */
3965
3966 static struct elf32_arm_stub_hash_entry *
3967 elf32_arm_add_stub (const char *stub_name,
3968 asection *section,
3969 struct elf32_arm_link_hash_table *htab)
3970 {
3971 asection *link_sec;
3972 asection *stub_sec;
3973 struct elf32_arm_stub_hash_entry *stub_entry;
3974
3975 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3976 if (stub_sec == NULL)
3977 return NULL;
3978
3979 /* Enter this entry into the linker stub hash table. */
3980 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3981 TRUE, FALSE);
3982 if (stub_entry == NULL)
3983 {
3984 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3985 section->owner,
3986 stub_name);
3987 return NULL;
3988 }
3989
3990 stub_entry->stub_sec = stub_sec;
3991 stub_entry->stub_offset = 0;
3992 stub_entry->id_sec = link_sec;
3993
3994 return stub_entry;
3995 }
3996
3997 /* Store an Arm insn into an output section not processed by
3998 elf32_arm_write_section. */
3999
4000 static void
4001 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4002 bfd * output_bfd, bfd_vma val, void * ptr)
4003 {
4004 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4005 bfd_putl32 (val, ptr);
4006 else
4007 bfd_putb32 (val, ptr);
4008 }
4009
4010 /* Store a 16-bit Thumb insn into an output section not processed by
4011 elf32_arm_write_section. */
4012
4013 static void
4014 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4015 bfd * output_bfd, bfd_vma val, void * ptr)
4016 {
4017 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4018 bfd_putl16 (val, ptr);
4019 else
4020 bfd_putb16 (val, ptr);
4021 }
4022
4023 /* If it's possible to change R_TYPE to a more efficient access
4024 model, return the new reloc type. */
4025
4026 static unsigned
4027 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4028 struct elf_link_hash_entry *h)
4029 {
4030 int is_local = (h == NULL);
4031
4032 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
4033 return r_type;
4034
4035 /* We do not support relaxations for Old TLS models. */
4036 switch (r_type)
4037 {
4038 case R_ARM_TLS_GOTDESC:
4039 case R_ARM_TLS_CALL:
4040 case R_ARM_THM_TLS_CALL:
4041 case R_ARM_TLS_DESCSEQ:
4042 case R_ARM_THM_TLS_DESCSEQ:
4043 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4044 }
4045
4046 return r_type;
4047 }
4048
4049 static bfd_reloc_status_type elf32_arm_final_link_relocate
4050 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4051 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4052 const char *, unsigned char, enum arm_st_branch_type,
4053 struct elf_link_hash_entry *, bfd_boolean *, char **);
4054
4055 static unsigned int
4056 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4057 {
4058 switch (stub_type)
4059 {
4060 case arm_stub_a8_veneer_b_cond:
4061 case arm_stub_a8_veneer_b:
4062 case arm_stub_a8_veneer_bl:
4063 return 2;
4064
4065 case arm_stub_long_branch_any_any:
4066 case arm_stub_long_branch_v4t_arm_thumb:
4067 case arm_stub_long_branch_thumb_only:
4068 case arm_stub_long_branch_v4t_thumb_thumb:
4069 case arm_stub_long_branch_v4t_thumb_arm:
4070 case arm_stub_short_branch_v4t_thumb_arm:
4071 case arm_stub_long_branch_any_arm_pic:
4072 case arm_stub_long_branch_any_thumb_pic:
4073 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4074 case arm_stub_long_branch_v4t_arm_thumb_pic:
4075 case arm_stub_long_branch_v4t_thumb_arm_pic:
4076 case arm_stub_long_branch_thumb_only_pic:
4077 case arm_stub_long_branch_any_tls_pic:
4078 case arm_stub_long_branch_v4t_thumb_tls_pic:
4079 case arm_stub_a8_veneer_blx:
4080 return 4;
4081
4082 default:
4083 abort (); /* Should be unreachable. */
4084 }
4085 }
4086
4087 static bfd_boolean
4088 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4089 void * in_arg)
4090 {
4091 #define MAXRELOCS 2
4092 struct elf32_arm_stub_hash_entry *stub_entry;
4093 struct elf32_arm_link_hash_table *globals;
4094 struct bfd_link_info *info;
4095 asection *stub_sec;
4096 bfd *stub_bfd;
4097 bfd_byte *loc;
4098 bfd_vma sym_value;
4099 int template_size;
4100 int size;
4101 const insn_sequence *template_sequence;
4102 int i;
4103 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4104 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4105 int nrelocs = 0;
4106
4107 /* Massage our args to the form they really have. */
4108 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4109 info = (struct bfd_link_info *) in_arg;
4110
4111 globals = elf32_arm_hash_table (info);
4112 if (globals == NULL)
4113 return FALSE;
4114
4115 stub_sec = stub_entry->stub_sec;
4116
4117 if ((globals->fix_cortex_a8 < 0)
4118 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4119 /* We have to do less-strictly-aligned fixes last. */
4120 return TRUE;
4121
4122 /* Make a note of the offset within the stubs for this entry. */
4123 stub_entry->stub_offset = stub_sec->size;
4124 loc = stub_sec->contents + stub_entry->stub_offset;
4125
4126 stub_bfd = stub_sec->owner;
4127
4128 /* This is the address of the stub destination. */
4129 sym_value = (stub_entry->target_value
4130 + stub_entry->target_section->output_offset
4131 + stub_entry->target_section->output_section->vma);
4132
4133 template_sequence = stub_entry->stub_template;
4134 template_size = stub_entry->stub_template_size;
4135
4136 size = 0;
4137 for (i = 0; i < template_size; i++)
4138 {
4139 switch (template_sequence[i].type)
4140 {
4141 case THUMB16_TYPE:
4142 {
4143 bfd_vma data = (bfd_vma) template_sequence[i].data;
4144 if (template_sequence[i].reloc_addend != 0)
4145 {
4146 /* We've borrowed the reloc_addend field to mean we should
4147 insert a condition code into this (Thumb-1 branch)
4148 instruction. See THUMB16_BCOND_INSN. */
4149 BFD_ASSERT ((data & 0xff00) == 0xd000);
4150 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4151 }
4152 bfd_put_16 (stub_bfd, data, loc + size);
4153 size += 2;
4154 }
4155 break;
4156
4157 case THUMB32_TYPE:
4158 bfd_put_16 (stub_bfd,
4159 (template_sequence[i].data >> 16) & 0xffff,
4160 loc + size);
4161 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4162 loc + size + 2);
4163 if (template_sequence[i].r_type != R_ARM_NONE)
4164 {
4165 stub_reloc_idx[nrelocs] = i;
4166 stub_reloc_offset[nrelocs++] = size;
4167 }
4168 size += 4;
4169 break;
4170
4171 case ARM_TYPE:
4172 bfd_put_32 (stub_bfd, template_sequence[i].data,
4173 loc + size);
4174 /* Handle cases where the target is encoded within the
4175 instruction. */
4176 if (template_sequence[i].r_type == R_ARM_JUMP24)
4177 {
4178 stub_reloc_idx[nrelocs] = i;
4179 stub_reloc_offset[nrelocs++] = size;
4180 }
4181 size += 4;
4182 break;
4183
4184 case DATA_TYPE:
4185 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4186 stub_reloc_idx[nrelocs] = i;
4187 stub_reloc_offset[nrelocs++] = size;
4188 size += 4;
4189 break;
4190
4191 default:
4192 BFD_FAIL ();
4193 return FALSE;
4194 }
4195 }
4196
4197 stub_sec->size += size;
4198
4199 /* Stub size has already been computed in arm_size_one_stub. Check
4200 consistency. */
4201 BFD_ASSERT (size == stub_entry->stub_size);
4202
4203 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4204 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4205 sym_value |= 1;
4206
4207 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4208 in each stub. */
4209 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4210
4211 for (i = 0; i < nrelocs; i++)
4212 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4213 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4214 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4215 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4216 {
4217 Elf_Internal_Rela rel;
4218 bfd_boolean unresolved_reloc;
4219 char *error_message;
4220 enum arm_st_branch_type branch_type
4221 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4222 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4223 bfd_vma points_to = sym_value + stub_entry->target_addend;
4224
4225 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4226 rel.r_info = ELF32_R_INFO (0,
4227 template_sequence[stub_reloc_idx[i]].r_type);
4228 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4229
4230 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4231 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4232 template should refer back to the instruction after the original
4233 branch. */
4234 points_to = sym_value;
4235
4236 /* There may be unintended consequences if this is not true. */
4237 BFD_ASSERT (stub_entry->h == NULL);
4238
4239 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4240 properly. We should probably use this function unconditionally,
4241 rather than only for certain relocations listed in the enclosing
4242 conditional, for the sake of consistency. */
4243 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4244 (template_sequence[stub_reloc_idx[i]].r_type),
4245 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4246 points_to, info, stub_entry->target_section, "", STT_FUNC,
4247 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4248 &unresolved_reloc, &error_message);
4249 }
4250 else
4251 {
4252 Elf_Internal_Rela rel;
4253 bfd_boolean unresolved_reloc;
4254 char *error_message;
4255 bfd_vma points_to = sym_value + stub_entry->target_addend
4256 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4257
4258 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4259 rel.r_info = ELF32_R_INFO (0,
4260 template_sequence[stub_reloc_idx[i]].r_type);
4261 rel.r_addend = 0;
4262
4263 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4264 (template_sequence[stub_reloc_idx[i]].r_type),
4265 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4266 points_to, info, stub_entry->target_section, "", STT_FUNC,
4267 stub_entry->branch_type,
4268 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4269 &error_message);
4270 }
4271
4272 return TRUE;
4273 #undef MAXRELOCS
4274 }
4275
4276 /* Calculate the template, template size and instruction size for a stub.
4277 Return value is the instruction size. */
4278
4279 static unsigned int
4280 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4281 const insn_sequence **stub_template,
4282 int *stub_template_size)
4283 {
4284 const insn_sequence *template_sequence = NULL;
4285 int template_size = 0, i;
4286 unsigned int size;
4287
4288 template_sequence = stub_definitions[stub_type].template_sequence;
4289 if (stub_template)
4290 *stub_template = template_sequence;
4291
4292 template_size = stub_definitions[stub_type].template_size;
4293 if (stub_template_size)
4294 *stub_template_size = template_size;
4295
4296 size = 0;
4297 for (i = 0; i < template_size; i++)
4298 {
4299 switch (template_sequence[i].type)
4300 {
4301 case THUMB16_TYPE:
4302 size += 2;
4303 break;
4304
4305 case ARM_TYPE:
4306 case THUMB32_TYPE:
4307 case DATA_TYPE:
4308 size += 4;
4309 break;
4310
4311 default:
4312 BFD_FAIL ();
4313 return 0;
4314 }
4315 }
4316
4317 return size;
4318 }
4319
4320 /* As above, but don't actually build the stub. Just bump offset so
4321 we know stub section sizes. */
4322
4323 static bfd_boolean
4324 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4325 void *in_arg ATTRIBUTE_UNUSED)
4326 {
4327 struct elf32_arm_stub_hash_entry *stub_entry;
4328 const insn_sequence *template_sequence;
4329 int template_size, size;
4330
4331 /* Massage our args to the form they really have. */
4332 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4333
4334 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4335 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4336
4337 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4338 &template_size);
4339
4340 stub_entry->stub_size = size;
4341 stub_entry->stub_template = template_sequence;
4342 stub_entry->stub_template_size = template_size;
4343
4344 size = (size + 7) & ~7;
4345 stub_entry->stub_sec->size += size;
4346
4347 return TRUE;
4348 }
4349
4350 /* External entry points for sizing and building linker stubs. */
4351
4352 /* Set up various things so that we can make a list of input sections
4353 for each output section included in the link. Returns -1 on error,
4354 0 when no stubs will be needed, and 1 on success. */
4355
4356 int
4357 elf32_arm_setup_section_lists (bfd *output_bfd,
4358 struct bfd_link_info *info)
4359 {
4360 bfd *input_bfd;
4361 unsigned int bfd_count;
4362 int top_id, top_index;
4363 asection *section;
4364 asection **input_list, **list;
4365 bfd_size_type amt;
4366 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4367
4368 if (htab == NULL)
4369 return 0;
4370 if (! is_elf_hash_table (htab))
4371 return 0;
4372
4373 /* Count the number of input BFDs and find the top input section id. */
4374 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4375 input_bfd != NULL;
4376 input_bfd = input_bfd->link_next)
4377 {
4378 bfd_count += 1;
4379 for (section = input_bfd->sections;
4380 section != NULL;
4381 section = section->next)
4382 {
4383 if (top_id < section->id)
4384 top_id = section->id;
4385 }
4386 }
4387 htab->bfd_count = bfd_count;
4388
4389 amt = sizeof (struct map_stub) * (top_id + 1);
4390 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4391 if (htab->stub_group == NULL)
4392 return -1;
4393 htab->top_id = top_id;
4394
4395 /* We can't use output_bfd->section_count here to find the top output
4396 section index as some sections may have been removed, and
4397 _bfd_strip_section_from_output doesn't renumber the indices. */
4398 for (section = output_bfd->sections, top_index = 0;
4399 section != NULL;
4400 section = section->next)
4401 {
4402 if (top_index < section->index)
4403 top_index = section->index;
4404 }
4405
4406 htab->top_index = top_index;
4407 amt = sizeof (asection *) * (top_index + 1);
4408 input_list = (asection **) bfd_malloc (amt);
4409 htab->input_list = input_list;
4410 if (input_list == NULL)
4411 return -1;
4412
4413 /* For sections we aren't interested in, mark their entries with a
4414 value we can check later. */
4415 list = input_list + top_index;
4416 do
4417 *list = bfd_abs_section_ptr;
4418 while (list-- != input_list);
4419
4420 for (section = output_bfd->sections;
4421 section != NULL;
4422 section = section->next)
4423 {
4424 if ((section->flags & SEC_CODE) != 0)
4425 input_list[section->index] = NULL;
4426 }
4427
4428 return 1;
4429 }
4430
4431 /* The linker repeatedly calls this function for each input section,
4432 in the order that input sections are linked into output sections.
4433 Build lists of input sections to determine groupings between which
4434 we may insert linker stubs. */
4435
4436 void
4437 elf32_arm_next_input_section (struct bfd_link_info *info,
4438 asection *isec)
4439 {
4440 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4441
4442 if (htab == NULL)
4443 return;
4444
4445 if (isec->output_section->index <= htab->top_index)
4446 {
4447 asection **list = htab->input_list + isec->output_section->index;
4448
4449 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4450 {
4451 /* Steal the link_sec pointer for our list. */
4452 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4453 /* This happens to make the list in reverse order,
4454 which we reverse later. */
4455 PREV_SEC (isec) = *list;
4456 *list = isec;
4457 }
4458 }
4459 }
4460
4461 /* See whether we can group stub sections together. Grouping stub
4462 sections may result in fewer stubs. More importantly, we need to
4463 put all .init* and .fini* stubs at the end of the .init or
4464 .fini output sections respectively, because glibc splits the
4465 _init and _fini functions into multiple parts. Putting a stub in
4466 the middle of a function is not a good idea. */
4467
4468 static void
4469 group_sections (struct elf32_arm_link_hash_table *htab,
4470 bfd_size_type stub_group_size,
4471 bfd_boolean stubs_always_after_branch)
4472 {
4473 asection **list = htab->input_list;
4474
4475 do
4476 {
4477 asection *tail = *list;
4478 asection *head;
4479
4480 if (tail == bfd_abs_section_ptr)
4481 continue;
4482
4483 /* Reverse the list: we must avoid placing stubs at the
4484 beginning of the section because the beginning of the text
4485 section may be required for an interrupt vector in bare metal
4486 code. */
4487 #define NEXT_SEC PREV_SEC
4488 head = NULL;
4489 while (tail != NULL)
4490 {
4491 /* Pop from tail. */
4492 asection *item = tail;
4493 tail = PREV_SEC (item);
4494
4495 /* Push on head. */
4496 NEXT_SEC (item) = head;
4497 head = item;
4498 }
4499
4500 while (head != NULL)
4501 {
4502 asection *curr;
4503 asection *next;
4504 bfd_vma stub_group_start = head->output_offset;
4505 bfd_vma end_of_next;
4506
4507 curr = head;
4508 while (NEXT_SEC (curr) != NULL)
4509 {
4510 next = NEXT_SEC (curr);
4511 end_of_next = next->output_offset + next->size;
4512 if (end_of_next - stub_group_start >= stub_group_size)
4513 /* End of NEXT is too far from start, so stop. */
4514 break;
4515 /* Add NEXT to the group. */
4516 curr = next;
4517 }
4518
4519 /* OK, the size from the start to the start of CURR is less
4520 than stub_group_size and thus can be handled by one stub
4521 section. (Or the head section is itself larger than
4522 stub_group_size, in which case we may be toast.)
4523 We should really be keeping track of the total size of
4524 stubs added here, as stubs contribute to the final output
4525 section size. */
4526 do
4527 {
4528 next = NEXT_SEC (head);
4529 /* Set up this stub group. */
4530 htab->stub_group[head->id].link_sec = curr;
4531 }
4532 while (head != curr && (head = next) != NULL);
4533
4534 /* But wait, there's more! Input sections up to stub_group_size
4535 bytes after the stub section can be handled by it too. */
4536 if (!stubs_always_after_branch)
4537 {
4538 stub_group_start = curr->output_offset + curr->size;
4539
4540 while (next != NULL)
4541 {
4542 end_of_next = next->output_offset + next->size;
4543 if (end_of_next - stub_group_start >= stub_group_size)
4544 /* End of NEXT is too far from stubs, so stop. */
4545 break;
4546 /* Add NEXT to the stub group. */
4547 head = next;
4548 next = NEXT_SEC (head);
4549 htab->stub_group[head->id].link_sec = curr;
4550 }
4551 }
4552 head = next;
4553 }
4554 }
4555 while (list++ != htab->input_list + htab->top_index);
4556
4557 free (htab->input_list);
4558 #undef PREV_SEC
4559 #undef NEXT_SEC
4560 }
4561
4562 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4563 erratum fix. */
4564
4565 static int
4566 a8_reloc_compare (const void *a, const void *b)
4567 {
4568 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4569 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4570
4571 if (ra->from < rb->from)
4572 return -1;
4573 else if (ra->from > rb->from)
4574 return 1;
4575 else
4576 return 0;
4577 }
4578
4579 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4580 const char *, char **);
4581
4582 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4583 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4584 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4585 otherwise. */
4586
4587 static bfd_boolean
4588 cortex_a8_erratum_scan (bfd *input_bfd,
4589 struct bfd_link_info *info,
4590 struct a8_erratum_fix **a8_fixes_p,
4591 unsigned int *num_a8_fixes_p,
4592 unsigned int *a8_fix_table_size_p,
4593 struct a8_erratum_reloc *a8_relocs,
4594 unsigned int num_a8_relocs,
4595 unsigned prev_num_a8_fixes,
4596 bfd_boolean *stub_changed_p)
4597 {
4598 asection *section;
4599 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4600 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4601 unsigned int num_a8_fixes = *num_a8_fixes_p;
4602 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4603
4604 if (htab == NULL)
4605 return FALSE;
4606
4607 for (section = input_bfd->sections;
4608 section != NULL;
4609 section = section->next)
4610 {
4611 bfd_byte *contents = NULL;
4612 struct _arm_elf_section_data *sec_data;
4613 unsigned int span;
4614 bfd_vma base_vma;
4615
4616 if (elf_section_type (section) != SHT_PROGBITS
4617 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4618 || (section->flags & SEC_EXCLUDE) != 0
4619 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4620 || (section->output_section == bfd_abs_section_ptr))
4621 continue;
4622
4623 base_vma = section->output_section->vma + section->output_offset;
4624
4625 if (elf_section_data (section)->this_hdr.contents != NULL)
4626 contents = elf_section_data (section)->this_hdr.contents;
4627 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4628 return TRUE;
4629
4630 sec_data = elf32_arm_section_data (section);
4631
4632 for (span = 0; span < sec_data->mapcount; span++)
4633 {
4634 unsigned int span_start = sec_data->map[span].vma;
4635 unsigned int span_end = (span == sec_data->mapcount - 1)
4636 ? section->size : sec_data->map[span + 1].vma;
4637 unsigned int i;
4638 char span_type = sec_data->map[span].type;
4639 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4640
4641 if (span_type != 't')
4642 continue;
4643
4644 /* Span is entirely within a single 4KB region: skip scanning. */
4645 if (((base_vma + span_start) & ~0xfff)
4646 == ((base_vma + span_end) & ~0xfff))
4647 continue;
4648
4649 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4650
4651 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4652 * The branch target is in the same 4KB region as the
4653 first half of the branch.
4654 * The instruction before the branch is a 32-bit
4655 length non-branch instruction. */
4656 for (i = span_start; i < span_end;)
4657 {
4658 unsigned int insn = bfd_getl16 (&contents[i]);
4659 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4660 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4661
4662 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4663 insn_32bit = TRUE;
4664
4665 if (insn_32bit)
4666 {
4667 /* Load the rest of the insn (in manual-friendly order). */
4668 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4669
4670 /* Encoding T4: B<c>.W. */
4671 is_b = (insn & 0xf800d000) == 0xf0009000;
4672 /* Encoding T1: BL<c>.W. */
4673 is_bl = (insn & 0xf800d000) == 0xf000d000;
4674 /* Encoding T2: BLX<c>.W. */
4675 is_blx = (insn & 0xf800d000) == 0xf000c000;
4676 /* Encoding T3: B<c>.W (not permitted in IT block). */
4677 is_bcc = (insn & 0xf800d000) == 0xf0008000
4678 && (insn & 0x07f00000) != 0x03800000;
4679 }
4680
4681 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4682
4683 if (((base_vma + i) & 0xfff) == 0xffe
4684 && insn_32bit
4685 && is_32bit_branch
4686 && last_was_32bit
4687 && ! last_was_branch)
4688 {
4689 bfd_signed_vma offset = 0;
4690 bfd_boolean force_target_arm = FALSE;
4691 bfd_boolean force_target_thumb = FALSE;
4692 bfd_vma target;
4693 enum elf32_arm_stub_type stub_type = arm_stub_none;
4694 struct a8_erratum_reloc key, *found;
4695 bfd_boolean use_plt = FALSE;
4696
4697 key.from = base_vma + i;
4698 found = (struct a8_erratum_reloc *)
4699 bsearch (&key, a8_relocs, num_a8_relocs,
4700 sizeof (struct a8_erratum_reloc),
4701 &a8_reloc_compare);
4702
4703 if (found)
4704 {
4705 char *error_message = NULL;
4706 struct elf_link_hash_entry *entry;
4707
4708 /* We don't care about the error returned from this
4709 function, only if there is glue or not. */
4710 entry = find_thumb_glue (info, found->sym_name,
4711 &error_message);
4712
4713 if (entry)
4714 found->non_a8_stub = TRUE;
4715
4716 /* Keep a simpler condition, for the sake of clarity. */
4717 if (htab->root.splt != NULL && found->hash != NULL
4718 && found->hash->root.plt.offset != (bfd_vma) -1)
4719 use_plt = TRUE;
4720
4721 if (found->r_type == R_ARM_THM_CALL)
4722 {
4723 if (found->branch_type == ST_BRANCH_TO_ARM
4724 || use_plt)
4725 force_target_arm = TRUE;
4726 else
4727 force_target_thumb = TRUE;
4728 }
4729 }
4730
4731 /* Check if we have an offending branch instruction. */
4732
4733 if (found && found->non_a8_stub)
4734 /* We've already made a stub for this instruction, e.g.
4735 it's a long branch or a Thumb->ARM stub. Assume that
4736 stub will suffice to work around the A8 erratum (see
4737 setting of always_after_branch above). */
4738 ;
4739 else if (is_bcc)
4740 {
4741 offset = (insn & 0x7ff) << 1;
4742 offset |= (insn & 0x3f0000) >> 4;
4743 offset |= (insn & 0x2000) ? 0x40000 : 0;
4744 offset |= (insn & 0x800) ? 0x80000 : 0;
4745 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4746 if (offset & 0x100000)
4747 offset |= ~ ((bfd_signed_vma) 0xfffff);
4748 stub_type = arm_stub_a8_veneer_b_cond;
4749 }
4750 else if (is_b || is_bl || is_blx)
4751 {
4752 int s = (insn & 0x4000000) != 0;
4753 int j1 = (insn & 0x2000) != 0;
4754 int j2 = (insn & 0x800) != 0;
4755 int i1 = !(j1 ^ s);
4756 int i2 = !(j2 ^ s);
4757
4758 offset = (insn & 0x7ff) << 1;
4759 offset |= (insn & 0x3ff0000) >> 4;
4760 offset |= i2 << 22;
4761 offset |= i1 << 23;
4762 offset |= s << 24;
4763 if (offset & 0x1000000)
4764 offset |= ~ ((bfd_signed_vma) 0xffffff);
4765
4766 if (is_blx)
4767 offset &= ~ ((bfd_signed_vma) 3);
4768
4769 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4770 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4771 }
4772
4773 if (stub_type != arm_stub_none)
4774 {
4775 bfd_vma pc_for_insn = base_vma + i + 4;
4776
4777 /* The original instruction is a BL, but the target is
4778 an ARM instruction. If we were not making a stub,
4779 the BL would have been converted to a BLX. Use the
4780 BLX stub instead in that case. */
4781 if (htab->use_blx && force_target_arm
4782 && stub_type == arm_stub_a8_veneer_bl)
4783 {
4784 stub_type = arm_stub_a8_veneer_blx;
4785 is_blx = TRUE;
4786 is_bl = FALSE;
4787 }
4788 /* Conversely, if the original instruction was
4789 BLX but the target is Thumb mode, use the BL
4790 stub. */
4791 else if (force_target_thumb
4792 && stub_type == arm_stub_a8_veneer_blx)
4793 {
4794 stub_type = arm_stub_a8_veneer_bl;
4795 is_blx = FALSE;
4796 is_bl = TRUE;
4797 }
4798
4799 if (is_blx)
4800 pc_for_insn &= ~ ((bfd_vma) 3);
4801
4802 /* If we found a relocation, use the proper destination,
4803 not the offset in the (unrelocated) instruction.
4804 Note this is always done if we switched the stub type
4805 above. */
4806 if (found)
4807 offset =
4808 (bfd_signed_vma) (found->destination - pc_for_insn);
4809
4810 /* If the stub will use a Thumb-mode branch to a
4811 PLT target, redirect it to the preceding Thumb
4812 entry point. */
4813 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4814 offset -= PLT_THUMB_STUB_SIZE;
4815
4816 target = pc_for_insn + offset;
4817
4818 /* The BLX stub is ARM-mode code. Adjust the offset to
4819 take the different PC value (+8 instead of +4) into
4820 account. */
4821 if (stub_type == arm_stub_a8_veneer_blx)
4822 offset += 4;
4823
4824 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4825 {
4826 char *stub_name = NULL;
4827
4828 if (num_a8_fixes == a8_fix_table_size)
4829 {
4830 a8_fix_table_size *= 2;
4831 a8_fixes = (struct a8_erratum_fix *)
4832 bfd_realloc (a8_fixes,
4833 sizeof (struct a8_erratum_fix)
4834 * a8_fix_table_size);
4835 }
4836
4837 if (num_a8_fixes < prev_num_a8_fixes)
4838 {
4839 /* If we're doing a subsequent scan,
4840 check if we've found the same fix as
4841 before, and try and reuse the stub
4842 name. */
4843 stub_name = a8_fixes[num_a8_fixes].stub_name;
4844 if ((a8_fixes[num_a8_fixes].section != section)
4845 || (a8_fixes[num_a8_fixes].offset != i))
4846 {
4847 free (stub_name);
4848 stub_name = NULL;
4849 *stub_changed_p = TRUE;
4850 }
4851 }
4852
4853 if (!stub_name)
4854 {
4855 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4856 if (stub_name != NULL)
4857 sprintf (stub_name, "%x:%x", section->id, i);
4858 }
4859
4860 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4861 a8_fixes[num_a8_fixes].section = section;
4862 a8_fixes[num_a8_fixes].offset = i;
4863 a8_fixes[num_a8_fixes].addend = offset;
4864 a8_fixes[num_a8_fixes].orig_insn = insn;
4865 a8_fixes[num_a8_fixes].stub_name = stub_name;
4866 a8_fixes[num_a8_fixes].stub_type = stub_type;
4867 a8_fixes[num_a8_fixes].branch_type =
4868 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4869
4870 num_a8_fixes++;
4871 }
4872 }
4873 }
4874
4875 i += insn_32bit ? 4 : 2;
4876 last_was_32bit = insn_32bit;
4877 last_was_branch = is_32bit_branch;
4878 }
4879 }
4880
4881 if (elf_section_data (section)->this_hdr.contents == NULL)
4882 free (contents);
4883 }
4884
4885 *a8_fixes_p = a8_fixes;
4886 *num_a8_fixes_p = num_a8_fixes;
4887 *a8_fix_table_size_p = a8_fix_table_size;
4888
4889 return FALSE;
4890 }
4891
4892 /* Determine and set the size of the stub section for a final link.
4893
4894 The basic idea here is to examine all the relocations looking for
4895 PC-relative calls to a target that is unreachable with a "bl"
4896 instruction. */
4897
4898 bfd_boolean
4899 elf32_arm_size_stubs (bfd *output_bfd,
4900 bfd *stub_bfd,
4901 struct bfd_link_info *info,
4902 bfd_signed_vma group_size,
4903 asection * (*add_stub_section) (const char *, asection *),
4904 void (*layout_sections_again) (void))
4905 {
4906 bfd_size_type stub_group_size;
4907 bfd_boolean stubs_always_after_branch;
4908 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4909 struct a8_erratum_fix *a8_fixes = NULL;
4910 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4911 struct a8_erratum_reloc *a8_relocs = NULL;
4912 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4913
4914 if (htab == NULL)
4915 return FALSE;
4916
4917 if (htab->fix_cortex_a8)
4918 {
4919 a8_fixes = (struct a8_erratum_fix *)
4920 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4921 a8_relocs = (struct a8_erratum_reloc *)
4922 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4923 }
4924
4925 /* Propagate mach to stub bfd, because it may not have been
4926 finalized when we created stub_bfd. */
4927 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4928 bfd_get_mach (output_bfd));
4929
4930 /* Stash our params away. */
4931 htab->stub_bfd = stub_bfd;
4932 htab->add_stub_section = add_stub_section;
4933 htab->layout_sections_again = layout_sections_again;
4934 stubs_always_after_branch = group_size < 0;
4935
4936 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4937 as the first half of a 32-bit branch straddling two 4K pages. This is a
4938 crude way of enforcing that. */
4939 if (htab->fix_cortex_a8)
4940 stubs_always_after_branch = 1;
4941
4942 if (group_size < 0)
4943 stub_group_size = -group_size;
4944 else
4945 stub_group_size = group_size;
4946
4947 if (stub_group_size == 1)
4948 {
4949 /* Default values. */
4950 /* Thumb branch range is +-4MB has to be used as the default
4951 maximum size (a given section can contain both ARM and Thumb
4952 code, so the worst case has to be taken into account).
4953
4954 This value is 24K less than that, which allows for 2025
4955 12-byte stubs. If we exceed that, then we will fail to link.
4956 The user will have to relink with an explicit group size
4957 option. */
4958 stub_group_size = 4170000;
4959 }
4960
4961 group_sections (htab, stub_group_size, stubs_always_after_branch);
4962
4963 /* If we're applying the cortex A8 fix, we need to determine the
4964 program header size now, because we cannot change it later --
4965 that could alter section placements. Notice the A8 erratum fix
4966 ends up requiring the section addresses to remain unchanged
4967 modulo the page size. That's something we cannot represent
4968 inside BFD, and we don't want to force the section alignment to
4969 be the page size. */
4970 if (htab->fix_cortex_a8)
4971 (*htab->layout_sections_again) ();
4972
4973 while (1)
4974 {
4975 bfd *input_bfd;
4976 unsigned int bfd_indx;
4977 asection *stub_sec;
4978 bfd_boolean stub_changed = FALSE;
4979 unsigned prev_num_a8_fixes = num_a8_fixes;
4980
4981 num_a8_fixes = 0;
4982 for (input_bfd = info->input_bfds, bfd_indx = 0;
4983 input_bfd != NULL;
4984 input_bfd = input_bfd->link_next, bfd_indx++)
4985 {
4986 Elf_Internal_Shdr *symtab_hdr;
4987 asection *section;
4988 Elf_Internal_Sym *local_syms = NULL;
4989
4990 if (!is_arm_elf (input_bfd))
4991 continue;
4992
4993 num_a8_relocs = 0;
4994
4995 /* We'll need the symbol table in a second. */
4996 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4997 if (symtab_hdr->sh_info == 0)
4998 continue;
4999
5000 /* Walk over each section attached to the input bfd. */
5001 for (section = input_bfd->sections;
5002 section != NULL;
5003 section = section->next)
5004 {
5005 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5006
5007 /* If there aren't any relocs, then there's nothing more
5008 to do. */
5009 if ((section->flags & SEC_RELOC) == 0
5010 || section->reloc_count == 0
5011 || (section->flags & SEC_CODE) == 0)
5012 continue;
5013
5014 /* If this section is a link-once section that will be
5015 discarded, then don't create any stubs. */
5016 if (section->output_section == NULL
5017 || section->output_section->owner != output_bfd)
5018 continue;
5019
5020 /* Get the relocs. */
5021 internal_relocs
5022 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5023 NULL, info->keep_memory);
5024 if (internal_relocs == NULL)
5025 goto error_ret_free_local;
5026
5027 /* Now examine each relocation. */
5028 irela = internal_relocs;
5029 irelaend = irela + section->reloc_count;
5030 for (; irela < irelaend; irela++)
5031 {
5032 unsigned int r_type, r_indx;
5033 enum elf32_arm_stub_type stub_type;
5034 struct elf32_arm_stub_hash_entry *stub_entry;
5035 asection *sym_sec;
5036 bfd_vma sym_value;
5037 bfd_vma destination;
5038 struct elf32_arm_link_hash_entry *hash;
5039 const char *sym_name;
5040 char *stub_name;
5041 const asection *id_sec;
5042 unsigned char st_type;
5043 enum arm_st_branch_type branch_type;
5044 bfd_boolean created_stub = FALSE;
5045
5046 r_type = ELF32_R_TYPE (irela->r_info);
5047 r_indx = ELF32_R_SYM (irela->r_info);
5048
5049 if (r_type >= (unsigned int) R_ARM_max)
5050 {
5051 bfd_set_error (bfd_error_bad_value);
5052 error_ret_free_internal:
5053 if (elf_section_data (section)->relocs == NULL)
5054 free (internal_relocs);
5055 goto error_ret_free_local;
5056 }
5057
5058 hash = NULL;
5059 if (r_indx >= symtab_hdr->sh_info)
5060 hash = elf32_arm_hash_entry
5061 (elf_sym_hashes (input_bfd)
5062 [r_indx - symtab_hdr->sh_info]);
5063
5064 /* Only look for stubs on branch instructions, or
5065 non-relaxed TLSCALL */
5066 if ((r_type != (unsigned int) R_ARM_CALL)
5067 && (r_type != (unsigned int) R_ARM_THM_CALL)
5068 && (r_type != (unsigned int) R_ARM_JUMP24)
5069 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5070 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5071 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5072 && (r_type != (unsigned int) R_ARM_PLT32)
5073 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5074 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5075 && r_type == elf32_arm_tls_transition
5076 (info, r_type, &hash->root)
5077 && ((hash ? hash->tls_type
5078 : (elf32_arm_local_got_tls_type
5079 (input_bfd)[r_indx]))
5080 & GOT_TLS_GDESC) != 0))
5081 continue;
5082
5083 /* Now determine the call target, its name, value,
5084 section. */
5085 sym_sec = NULL;
5086 sym_value = 0;
5087 destination = 0;
5088 sym_name = NULL;
5089
5090 if (r_type == (unsigned int) R_ARM_TLS_CALL
5091 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5092 {
5093 /* A non-relaxed TLS call. The target is the
5094 plt-resident trampoline and nothing to do
5095 with the symbol. */
5096 BFD_ASSERT (htab->tls_trampoline > 0);
5097 sym_sec = htab->root.splt;
5098 sym_value = htab->tls_trampoline;
5099 hash = 0;
5100 st_type = STT_FUNC;
5101 branch_type = ST_BRANCH_TO_ARM;
5102 }
5103 else if (!hash)
5104 {
5105 /* It's a local symbol. */
5106 Elf_Internal_Sym *sym;
5107
5108 if (local_syms == NULL)
5109 {
5110 local_syms
5111 = (Elf_Internal_Sym *) symtab_hdr->contents;
5112 if (local_syms == NULL)
5113 local_syms
5114 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5115 symtab_hdr->sh_info, 0,
5116 NULL, NULL, NULL);
5117 if (local_syms == NULL)
5118 goto error_ret_free_internal;
5119 }
5120
5121 sym = local_syms + r_indx;
5122 if (sym->st_shndx == SHN_UNDEF)
5123 sym_sec = bfd_und_section_ptr;
5124 else if (sym->st_shndx == SHN_ABS)
5125 sym_sec = bfd_abs_section_ptr;
5126 else if (sym->st_shndx == SHN_COMMON)
5127 sym_sec = bfd_com_section_ptr;
5128 else
5129 sym_sec =
5130 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5131
5132 if (!sym_sec)
5133 /* This is an undefined symbol. It can never
5134 be resolved. */
5135 continue;
5136
5137 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5138 sym_value = sym->st_value;
5139 destination = (sym_value + irela->r_addend
5140 + sym_sec->output_offset
5141 + sym_sec->output_section->vma);
5142 st_type = ELF_ST_TYPE (sym->st_info);
5143 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5144 sym_name
5145 = bfd_elf_string_from_elf_section (input_bfd,
5146 symtab_hdr->sh_link,
5147 sym->st_name);
5148 }
5149 else
5150 {
5151 /* It's an external symbol. */
5152 while (hash->root.root.type == bfd_link_hash_indirect
5153 || hash->root.root.type == bfd_link_hash_warning)
5154 hash = ((struct elf32_arm_link_hash_entry *)
5155 hash->root.root.u.i.link);
5156
5157 if (hash->root.root.type == bfd_link_hash_defined
5158 || hash->root.root.type == bfd_link_hash_defweak)
5159 {
5160 sym_sec = hash->root.root.u.def.section;
5161 sym_value = hash->root.root.u.def.value;
5162
5163 struct elf32_arm_link_hash_table *globals =
5164 elf32_arm_hash_table (info);
5165
5166 /* For a destination in a shared library,
5167 use the PLT stub as target address to
5168 decide whether a branch stub is
5169 needed. */
5170 if (globals != NULL
5171 && globals->root.splt != NULL
5172 && hash != NULL
5173 && hash->root.plt.offset != (bfd_vma) -1)
5174 {
5175 sym_sec = globals->root.splt;
5176 sym_value = hash->root.plt.offset;
5177 if (sym_sec->output_section != NULL)
5178 destination = (sym_value
5179 + sym_sec->output_offset
5180 + sym_sec->output_section->vma);
5181 }
5182 else if (sym_sec->output_section != NULL)
5183 destination = (sym_value + irela->r_addend
5184 + sym_sec->output_offset
5185 + sym_sec->output_section->vma);
5186 }
5187 else if ((hash->root.root.type == bfd_link_hash_undefined)
5188 || (hash->root.root.type == bfd_link_hash_undefweak))
5189 {
5190 /* For a shared library, use the PLT stub as
5191 target address to decide whether a long
5192 branch stub is needed.
5193 For absolute code, they cannot be handled. */
5194 struct elf32_arm_link_hash_table *globals =
5195 elf32_arm_hash_table (info);
5196
5197 if (globals != NULL
5198 && globals->root.splt != NULL
5199 && hash != NULL
5200 && hash->root.plt.offset != (bfd_vma) -1)
5201 {
5202 sym_sec = globals->root.splt;
5203 sym_value = hash->root.plt.offset;
5204 if (sym_sec->output_section != NULL)
5205 destination = (sym_value
5206 + sym_sec->output_offset
5207 + sym_sec->output_section->vma);
5208 }
5209 else
5210 continue;
5211 }
5212 else
5213 {
5214 bfd_set_error (bfd_error_bad_value);
5215 goto error_ret_free_internal;
5216 }
5217 st_type = hash->root.type;
5218 branch_type = hash->root.target_internal;
5219 sym_name = hash->root.root.root.string;
5220 }
5221
5222 do
5223 {
5224 /* Determine what (if any) linker stub is needed. */
5225 stub_type = arm_type_of_stub (info, section, irela,
5226 st_type, &branch_type,
5227 hash, destination, sym_sec,
5228 input_bfd, sym_name);
5229 if (stub_type == arm_stub_none)
5230 break;
5231
5232 /* Support for grouping stub sections. */
5233 id_sec = htab->stub_group[section->id].link_sec;
5234
5235 /* Get the name of this stub. */
5236 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5237 irela, stub_type);
5238 if (!stub_name)
5239 goto error_ret_free_internal;
5240
5241 /* We've either created a stub for this reloc already,
5242 or we are about to. */
5243 created_stub = TRUE;
5244
5245 stub_entry = arm_stub_hash_lookup
5246 (&htab->stub_hash_table, stub_name,
5247 FALSE, FALSE);
5248 if (stub_entry != NULL)
5249 {
5250 /* The proper stub has already been created. */
5251 free (stub_name);
5252 stub_entry->target_value = sym_value;
5253 break;
5254 }
5255
5256 stub_entry = elf32_arm_add_stub (stub_name, section,
5257 htab);
5258 if (stub_entry == NULL)
5259 {
5260 free (stub_name);
5261 goto error_ret_free_internal;
5262 }
5263
5264 stub_entry->target_value = sym_value;
5265 stub_entry->target_section = sym_sec;
5266 stub_entry->stub_type = stub_type;
5267 stub_entry->h = hash;
5268 stub_entry->branch_type = branch_type;
5269
5270 if (sym_name == NULL)
5271 sym_name = "unnamed";
5272 stub_entry->output_name = (char *)
5273 bfd_alloc (htab->stub_bfd,
5274 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5275 + strlen (sym_name));
5276 if (stub_entry->output_name == NULL)
5277 {
5278 free (stub_name);
5279 goto error_ret_free_internal;
5280 }
5281
5282 /* For historical reasons, use the existing names for
5283 ARM-to-Thumb and Thumb-to-ARM stubs. */
5284 if ((r_type == (unsigned int) R_ARM_THM_CALL
5285 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5286 && branch_type == ST_BRANCH_TO_ARM)
5287 sprintf (stub_entry->output_name,
5288 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5289 else if ((r_type == (unsigned int) R_ARM_CALL
5290 || r_type == (unsigned int) R_ARM_JUMP24)
5291 && branch_type == ST_BRANCH_TO_THUMB)
5292 sprintf (stub_entry->output_name,
5293 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5294 else
5295 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5296 sym_name);
5297
5298 stub_changed = TRUE;
5299 }
5300 while (0);
5301
5302 /* Look for relocations which might trigger Cortex-A8
5303 erratum. */
5304 if (htab->fix_cortex_a8
5305 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5306 || r_type == (unsigned int) R_ARM_THM_JUMP19
5307 || r_type == (unsigned int) R_ARM_THM_CALL
5308 || r_type == (unsigned int) R_ARM_THM_XPC22))
5309 {
5310 bfd_vma from = section->output_section->vma
5311 + section->output_offset
5312 + irela->r_offset;
5313
5314 if ((from & 0xfff) == 0xffe)
5315 {
5316 /* Found a candidate. Note we haven't checked the
5317 destination is within 4K here: if we do so (and
5318 don't create an entry in a8_relocs) we can't tell
5319 that a branch should have been relocated when
5320 scanning later. */
5321 if (num_a8_relocs == a8_reloc_table_size)
5322 {
5323 a8_reloc_table_size *= 2;
5324 a8_relocs = (struct a8_erratum_reloc *)
5325 bfd_realloc (a8_relocs,
5326 sizeof (struct a8_erratum_reloc)
5327 * a8_reloc_table_size);
5328 }
5329
5330 a8_relocs[num_a8_relocs].from = from;
5331 a8_relocs[num_a8_relocs].destination = destination;
5332 a8_relocs[num_a8_relocs].r_type = r_type;
5333 a8_relocs[num_a8_relocs].branch_type = branch_type;
5334 a8_relocs[num_a8_relocs].sym_name = sym_name;
5335 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5336 a8_relocs[num_a8_relocs].hash = hash;
5337
5338 num_a8_relocs++;
5339 }
5340 }
5341 }
5342
5343 /* We're done with the internal relocs, free them. */
5344 if (elf_section_data (section)->relocs == NULL)
5345 free (internal_relocs);
5346 }
5347
5348 if (htab->fix_cortex_a8)
5349 {
5350 /* Sort relocs which might apply to Cortex-A8 erratum. */
5351 qsort (a8_relocs, num_a8_relocs,
5352 sizeof (struct a8_erratum_reloc),
5353 &a8_reloc_compare);
5354
5355 /* Scan for branches which might trigger Cortex-A8 erratum. */
5356 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5357 &num_a8_fixes, &a8_fix_table_size,
5358 a8_relocs, num_a8_relocs,
5359 prev_num_a8_fixes, &stub_changed)
5360 != 0)
5361 goto error_ret_free_local;
5362 }
5363 }
5364
5365 if (prev_num_a8_fixes != num_a8_fixes)
5366 stub_changed = TRUE;
5367
5368 if (!stub_changed)
5369 break;
5370
5371 /* OK, we've added some stubs. Find out the new size of the
5372 stub sections. */
5373 for (stub_sec = htab->stub_bfd->sections;
5374 stub_sec != NULL;
5375 stub_sec = stub_sec->next)
5376 {
5377 /* Ignore non-stub sections. */
5378 if (!strstr (stub_sec->name, STUB_SUFFIX))
5379 continue;
5380
5381 stub_sec->size = 0;
5382 }
5383
5384 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5385
5386 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5387 if (htab->fix_cortex_a8)
5388 for (i = 0; i < num_a8_fixes; i++)
5389 {
5390 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5391 a8_fixes[i].section, htab);
5392
5393 if (stub_sec == NULL)
5394 goto error_ret_free_local;
5395
5396 stub_sec->size
5397 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5398 NULL);
5399 }
5400
5401
5402 /* Ask the linker to do its stuff. */
5403 (*htab->layout_sections_again) ();
5404 }
5405
5406 /* Add stubs for Cortex-A8 erratum fixes now. */
5407 if (htab->fix_cortex_a8)
5408 {
5409 for (i = 0; i < num_a8_fixes; i++)
5410 {
5411 struct elf32_arm_stub_hash_entry *stub_entry;
5412 char *stub_name = a8_fixes[i].stub_name;
5413 asection *section = a8_fixes[i].section;
5414 unsigned int section_id = a8_fixes[i].section->id;
5415 asection *link_sec = htab->stub_group[section_id].link_sec;
5416 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5417 const insn_sequence *template_sequence;
5418 int template_size, size = 0;
5419
5420 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5421 TRUE, FALSE);
5422 if (stub_entry == NULL)
5423 {
5424 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5425 section->owner,
5426 stub_name);
5427 return FALSE;
5428 }
5429
5430 stub_entry->stub_sec = stub_sec;
5431 stub_entry->stub_offset = 0;
5432 stub_entry->id_sec = link_sec;
5433 stub_entry->stub_type = a8_fixes[i].stub_type;
5434 stub_entry->target_section = a8_fixes[i].section;
5435 stub_entry->target_value = a8_fixes[i].offset;
5436 stub_entry->target_addend = a8_fixes[i].addend;
5437 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5438 stub_entry->branch_type = a8_fixes[i].branch_type;
5439
5440 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5441 &template_sequence,
5442 &template_size);
5443
5444 stub_entry->stub_size = size;
5445 stub_entry->stub_template = template_sequence;
5446 stub_entry->stub_template_size = template_size;
5447 }
5448
5449 /* Stash the Cortex-A8 erratum fix array for use later in
5450 elf32_arm_write_section(). */
5451 htab->a8_erratum_fixes = a8_fixes;
5452 htab->num_a8_erratum_fixes = num_a8_fixes;
5453 }
5454 else
5455 {
5456 htab->a8_erratum_fixes = NULL;
5457 htab->num_a8_erratum_fixes = 0;
5458 }
5459 return TRUE;
5460
5461 error_ret_free_local:
5462 return FALSE;
5463 }
5464
5465 /* Build all the stubs associated with the current output file. The
5466 stubs are kept in a hash table attached to the main linker hash
5467 table. We also set up the .plt entries for statically linked PIC
5468 functions here. This function is called via arm_elf_finish in the
5469 linker. */
5470
5471 bfd_boolean
5472 elf32_arm_build_stubs (struct bfd_link_info *info)
5473 {
5474 asection *stub_sec;
5475 struct bfd_hash_table *table;
5476 struct elf32_arm_link_hash_table *htab;
5477
5478 htab = elf32_arm_hash_table (info);
5479 if (htab == NULL)
5480 return FALSE;
5481
5482 for (stub_sec = htab->stub_bfd->sections;
5483 stub_sec != NULL;
5484 stub_sec = stub_sec->next)
5485 {
5486 bfd_size_type size;
5487
5488 /* Ignore non-stub sections. */
5489 if (!strstr (stub_sec->name, STUB_SUFFIX))
5490 continue;
5491
5492 /* Allocate memory to hold the linker stubs. */
5493 size = stub_sec->size;
5494 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5495 if (stub_sec->contents == NULL && size != 0)
5496 return FALSE;
5497 stub_sec->size = 0;
5498 }
5499
5500 /* Build the stubs as directed by the stub hash table. */
5501 table = &htab->stub_hash_table;
5502 bfd_hash_traverse (table, arm_build_one_stub, info);
5503 if (htab->fix_cortex_a8)
5504 {
5505 /* Place the cortex a8 stubs last. */
5506 htab->fix_cortex_a8 = -1;
5507 bfd_hash_traverse (table, arm_build_one_stub, info);
5508 }
5509
5510 return TRUE;
5511 }
5512
5513 /* Locate the Thumb encoded calling stub for NAME. */
5514
5515 static struct elf_link_hash_entry *
5516 find_thumb_glue (struct bfd_link_info *link_info,
5517 const char *name,
5518 char **error_message)
5519 {
5520 char *tmp_name;
5521 struct elf_link_hash_entry *hash;
5522 struct elf32_arm_link_hash_table *hash_table;
5523
5524 /* We need a pointer to the armelf specific hash table. */
5525 hash_table = elf32_arm_hash_table (link_info);
5526 if (hash_table == NULL)
5527 return NULL;
5528
5529 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5530 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5531
5532 BFD_ASSERT (tmp_name);
5533
5534 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5535
5536 hash = elf_link_hash_lookup
5537 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5538
5539 if (hash == NULL
5540 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5541 tmp_name, name) == -1)
5542 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5543
5544 free (tmp_name);
5545
5546 return hash;
5547 }
5548
5549 /* Locate the ARM encoded calling stub for NAME. */
5550
5551 static struct elf_link_hash_entry *
5552 find_arm_glue (struct bfd_link_info *link_info,
5553 const char *name,
5554 char **error_message)
5555 {
5556 char *tmp_name;
5557 struct elf_link_hash_entry *myh;
5558 struct elf32_arm_link_hash_table *hash_table;
5559
5560 /* We need a pointer to the elfarm specific hash table. */
5561 hash_table = elf32_arm_hash_table (link_info);
5562 if (hash_table == NULL)
5563 return NULL;
5564
5565 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5566 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5567
5568 BFD_ASSERT (tmp_name);
5569
5570 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5571
5572 myh = elf_link_hash_lookup
5573 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5574
5575 if (myh == NULL
5576 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5577 tmp_name, name) == -1)
5578 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5579
5580 free (tmp_name);
5581
5582 return myh;
5583 }
5584
5585 /* ARM->Thumb glue (static images):
5586
5587 .arm
5588 __func_from_arm:
5589 ldr r12, __func_addr
5590 bx r12
5591 __func_addr:
5592 .word func @ behave as if you saw a ARM_32 reloc.
5593
5594 (v5t static images)
5595 .arm
5596 __func_from_arm:
5597 ldr pc, __func_addr
5598 __func_addr:
5599 .word func @ behave as if you saw a ARM_32 reloc.
5600
5601 (relocatable images)
5602 .arm
5603 __func_from_arm:
5604 ldr r12, __func_offset
5605 add r12, r12, pc
5606 bx r12
5607 __func_offset:
5608 .word func - . */
5609
5610 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5611 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5612 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5613 static const insn32 a2t3_func_addr_insn = 0x00000001;
5614
5615 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5616 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5617 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5618
5619 #define ARM2THUMB_PIC_GLUE_SIZE 16
5620 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5621 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5622 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5623
5624 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5625
5626 .thumb .thumb
5627 .align 2 .align 2
5628 __func_from_thumb: __func_from_thumb:
5629 bx pc push {r6, lr}
5630 nop ldr r6, __func_addr
5631 .arm mov lr, pc
5632 b func bx r6
5633 .arm
5634 ;; back_to_thumb
5635 ldmia r13! {r6, lr}
5636 bx lr
5637 __func_addr:
5638 .word func */
5639
5640 #define THUMB2ARM_GLUE_SIZE 8
5641 static const insn16 t2a1_bx_pc_insn = 0x4778;
5642 static const insn16 t2a2_noop_insn = 0x46c0;
5643 static const insn32 t2a3_b_insn = 0xea000000;
5644
5645 #define VFP11_ERRATUM_VENEER_SIZE 8
5646
5647 #define ARM_BX_VENEER_SIZE 12
5648 static const insn32 armbx1_tst_insn = 0xe3100001;
5649 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5650 static const insn32 armbx3_bx_insn = 0xe12fff10;
5651
5652 #ifndef ELFARM_NABI_C_INCLUDED
5653 static void
5654 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5655 {
5656 asection * s;
5657 bfd_byte * contents;
5658
5659 if (size == 0)
5660 {
5661 /* Do not include empty glue sections in the output. */
5662 if (abfd != NULL)
5663 {
5664 s = bfd_get_linker_section (abfd, name);
5665 if (s != NULL)
5666 s->flags |= SEC_EXCLUDE;
5667 }
5668 return;
5669 }
5670
5671 BFD_ASSERT (abfd != NULL);
5672
5673 s = bfd_get_linker_section (abfd, name);
5674 BFD_ASSERT (s != NULL);
5675
5676 contents = (bfd_byte *) bfd_alloc (abfd, size);
5677
5678 BFD_ASSERT (s->size == size);
5679 s->contents = contents;
5680 }
5681
5682 bfd_boolean
5683 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5684 {
5685 struct elf32_arm_link_hash_table * globals;
5686
5687 globals = elf32_arm_hash_table (info);
5688 BFD_ASSERT (globals != NULL);
5689
5690 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5691 globals->arm_glue_size,
5692 ARM2THUMB_GLUE_SECTION_NAME);
5693
5694 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5695 globals->thumb_glue_size,
5696 THUMB2ARM_GLUE_SECTION_NAME);
5697
5698 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5699 globals->vfp11_erratum_glue_size,
5700 VFP11_ERRATUM_VENEER_SECTION_NAME);
5701
5702 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5703 globals->bx_glue_size,
5704 ARM_BX_GLUE_SECTION_NAME);
5705
5706 return TRUE;
5707 }
5708
5709 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5710 returns the symbol identifying the stub. */
5711
5712 static struct elf_link_hash_entry *
5713 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5714 struct elf_link_hash_entry * h)
5715 {
5716 const char * name = h->root.root.string;
5717 asection * s;
5718 char * tmp_name;
5719 struct elf_link_hash_entry * myh;
5720 struct bfd_link_hash_entry * bh;
5721 struct elf32_arm_link_hash_table * globals;
5722 bfd_vma val;
5723 bfd_size_type size;
5724
5725 globals = elf32_arm_hash_table (link_info);
5726 BFD_ASSERT (globals != NULL);
5727 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5728
5729 s = bfd_get_linker_section
5730 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5731
5732 BFD_ASSERT (s != NULL);
5733
5734 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5735 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5736
5737 BFD_ASSERT (tmp_name);
5738
5739 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5740
5741 myh = elf_link_hash_lookup
5742 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5743
5744 if (myh != NULL)
5745 {
5746 /* We've already seen this guy. */
5747 free (tmp_name);
5748 return myh;
5749 }
5750
5751 /* The only trick here is using hash_table->arm_glue_size as the value.
5752 Even though the section isn't allocated yet, this is where we will be
5753 putting it. The +1 on the value marks that the stub has not been
5754 output yet - not that it is a Thumb function. */
5755 bh = NULL;
5756 val = globals->arm_glue_size + 1;
5757 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5758 tmp_name, BSF_GLOBAL, s, val,
5759 NULL, TRUE, FALSE, &bh);
5760
5761 myh = (struct elf_link_hash_entry *) bh;
5762 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5763 myh->forced_local = 1;
5764
5765 free (tmp_name);
5766
5767 if (link_info->shared || globals->root.is_relocatable_executable
5768 || globals->pic_veneer)
5769 size = ARM2THUMB_PIC_GLUE_SIZE;
5770 else if (globals->use_blx)
5771 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5772 else
5773 size = ARM2THUMB_STATIC_GLUE_SIZE;
5774
5775 s->size += size;
5776 globals->arm_glue_size += size;
5777
5778 return myh;
5779 }
5780
5781 /* Allocate space for ARMv4 BX veneers. */
5782
5783 static void
5784 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5785 {
5786 asection * s;
5787 struct elf32_arm_link_hash_table *globals;
5788 char *tmp_name;
5789 struct elf_link_hash_entry *myh;
5790 struct bfd_link_hash_entry *bh;
5791 bfd_vma val;
5792
5793 /* BX PC does not need a veneer. */
5794 if (reg == 15)
5795 return;
5796
5797 globals = elf32_arm_hash_table (link_info);
5798 BFD_ASSERT (globals != NULL);
5799 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5800
5801 /* Check if this veneer has already been allocated. */
5802 if (globals->bx_glue_offset[reg])
5803 return;
5804
5805 s = bfd_get_linker_section
5806 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5807
5808 BFD_ASSERT (s != NULL);
5809
5810 /* Add symbol for veneer. */
5811 tmp_name = (char *)
5812 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5813
5814 BFD_ASSERT (tmp_name);
5815
5816 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5817
5818 myh = elf_link_hash_lookup
5819 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5820
5821 BFD_ASSERT (myh == NULL);
5822
5823 bh = NULL;
5824 val = globals->bx_glue_size;
5825 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5826 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5827 NULL, TRUE, FALSE, &bh);
5828
5829 myh = (struct elf_link_hash_entry *) bh;
5830 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5831 myh->forced_local = 1;
5832
5833 s->size += ARM_BX_VENEER_SIZE;
5834 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5835 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5836 }
5837
5838
5839 /* Add an entry to the code/data map for section SEC. */
5840
5841 static void
5842 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5843 {
5844 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5845 unsigned int newidx;
5846
5847 if (sec_data->map == NULL)
5848 {
5849 sec_data->map = (elf32_arm_section_map *)
5850 bfd_malloc (sizeof (elf32_arm_section_map));
5851 sec_data->mapcount = 0;
5852 sec_data->mapsize = 1;
5853 }
5854
5855 newidx = sec_data->mapcount++;
5856
5857 if (sec_data->mapcount > sec_data->mapsize)
5858 {
5859 sec_data->mapsize *= 2;
5860 sec_data->map = (elf32_arm_section_map *)
5861 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5862 * sizeof (elf32_arm_section_map));
5863 }
5864
5865 if (sec_data->map)
5866 {
5867 sec_data->map[newidx].vma = vma;
5868 sec_data->map[newidx].type = type;
5869 }
5870 }
5871
5872
5873 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5874 veneers are handled for now. */
5875
5876 static bfd_vma
5877 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5878 elf32_vfp11_erratum_list *branch,
5879 bfd *branch_bfd,
5880 asection *branch_sec,
5881 unsigned int offset)
5882 {
5883 asection *s;
5884 struct elf32_arm_link_hash_table *hash_table;
5885 char *tmp_name;
5886 struct elf_link_hash_entry *myh;
5887 struct bfd_link_hash_entry *bh;
5888 bfd_vma val;
5889 struct _arm_elf_section_data *sec_data;
5890 elf32_vfp11_erratum_list *newerr;
5891
5892 hash_table = elf32_arm_hash_table (link_info);
5893 BFD_ASSERT (hash_table != NULL);
5894 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5895
5896 s = bfd_get_linker_section
5897 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5898
5899 sec_data = elf32_arm_section_data (s);
5900
5901 BFD_ASSERT (s != NULL);
5902
5903 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5904 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5905
5906 BFD_ASSERT (tmp_name);
5907
5908 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5909 hash_table->num_vfp11_fixes);
5910
5911 myh = elf_link_hash_lookup
5912 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5913
5914 BFD_ASSERT (myh == NULL);
5915
5916 bh = NULL;
5917 val = hash_table->vfp11_erratum_glue_size;
5918 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5919 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5920 NULL, TRUE, FALSE, &bh);
5921
5922 myh = (struct elf_link_hash_entry *) bh;
5923 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5924 myh->forced_local = 1;
5925
5926 /* Link veneer back to calling location. */
5927 sec_data->erratumcount += 1;
5928 newerr = (elf32_vfp11_erratum_list *)
5929 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5930
5931 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5932 newerr->vma = -1;
5933 newerr->u.v.branch = branch;
5934 newerr->u.v.id = hash_table->num_vfp11_fixes;
5935 branch->u.b.veneer = newerr;
5936
5937 newerr->next = sec_data->erratumlist;
5938 sec_data->erratumlist = newerr;
5939
5940 /* A symbol for the return from the veneer. */
5941 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5942 hash_table->num_vfp11_fixes);
5943
5944 myh = elf_link_hash_lookup
5945 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5946
5947 if (myh != NULL)
5948 abort ();
5949
5950 bh = NULL;
5951 val = offset + 4;
5952 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5953 branch_sec, val, NULL, TRUE, FALSE, &bh);
5954
5955 myh = (struct elf_link_hash_entry *) bh;
5956 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5957 myh->forced_local = 1;
5958
5959 free (tmp_name);
5960
5961 /* Generate a mapping symbol for the veneer section, and explicitly add an
5962 entry for that symbol to the code/data map for the section. */
5963 if (hash_table->vfp11_erratum_glue_size == 0)
5964 {
5965 bh = NULL;
5966 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5967 ever requires this erratum fix. */
5968 _bfd_generic_link_add_one_symbol (link_info,
5969 hash_table->bfd_of_glue_owner, "$a",
5970 BSF_LOCAL, s, 0, NULL,
5971 TRUE, FALSE, &bh);
5972
5973 myh = (struct elf_link_hash_entry *) bh;
5974 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5975 myh->forced_local = 1;
5976
5977 /* The elf32_arm_init_maps function only cares about symbols from input
5978 BFDs. We must make a note of this generated mapping symbol
5979 ourselves so that code byteswapping works properly in
5980 elf32_arm_write_section. */
5981 elf32_arm_section_map_add (s, 'a', 0);
5982 }
5983
5984 s->size += VFP11_ERRATUM_VENEER_SIZE;
5985 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5986 hash_table->num_vfp11_fixes++;
5987
5988 /* The offset of the veneer. */
5989 return val;
5990 }
5991
5992 #define ARM_GLUE_SECTION_FLAGS \
5993 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5994 | SEC_READONLY | SEC_LINKER_CREATED)
5995
5996 /* Create a fake section for use by the ARM backend of the linker. */
5997
5998 static bfd_boolean
5999 arm_make_glue_section (bfd * abfd, const char * name)
6000 {
6001 asection * sec;
6002
6003 sec = bfd_get_linker_section (abfd, name);
6004 if (sec != NULL)
6005 /* Already made. */
6006 return TRUE;
6007
6008 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6009
6010 if (sec == NULL
6011 || !bfd_set_section_alignment (abfd, sec, 2))
6012 return FALSE;
6013
6014 /* Set the gc mark to prevent the section from being removed by garbage
6015 collection, despite the fact that no relocs refer to this section. */
6016 sec->gc_mark = 1;
6017
6018 return TRUE;
6019 }
6020
6021 /* Add the glue sections to ABFD. This function is called from the
6022 linker scripts in ld/emultempl/{armelf}.em. */
6023
6024 bfd_boolean
6025 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6026 struct bfd_link_info *info)
6027 {
6028 /* If we are only performing a partial
6029 link do not bother adding the glue. */
6030 if (info->relocatable)
6031 return TRUE;
6032
6033 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6034 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6035 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6036 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6037 }
6038
6039 /* Select a BFD to be used to hold the sections used by the glue code.
6040 This function is called from the linker scripts in ld/emultempl/
6041 {armelf/pe}.em. */
6042
6043 bfd_boolean
6044 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6045 {
6046 struct elf32_arm_link_hash_table *globals;
6047
6048 /* If we are only performing a partial link
6049 do not bother getting a bfd to hold the glue. */
6050 if (info->relocatable)
6051 return TRUE;
6052
6053 /* Make sure we don't attach the glue sections to a dynamic object. */
6054 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6055
6056 globals = elf32_arm_hash_table (info);
6057 BFD_ASSERT (globals != NULL);
6058
6059 if (globals->bfd_of_glue_owner != NULL)
6060 return TRUE;
6061
6062 /* Save the bfd for later use. */
6063 globals->bfd_of_glue_owner = abfd;
6064
6065 return TRUE;
6066 }
6067
6068 static void
6069 check_use_blx (struct elf32_arm_link_hash_table *globals)
6070 {
6071 int cpu_arch;
6072
6073 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6074 Tag_CPU_arch);
6075
6076 if (globals->fix_arm1176)
6077 {
6078 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6079 globals->use_blx = 1;
6080 }
6081 else
6082 {
6083 if (cpu_arch > TAG_CPU_ARCH_V4T)
6084 globals->use_blx = 1;
6085 }
6086 }
6087
6088 bfd_boolean
6089 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6090 struct bfd_link_info *link_info)
6091 {
6092 Elf_Internal_Shdr *symtab_hdr;
6093 Elf_Internal_Rela *internal_relocs = NULL;
6094 Elf_Internal_Rela *irel, *irelend;
6095 bfd_byte *contents = NULL;
6096
6097 asection *sec;
6098 struct elf32_arm_link_hash_table *globals;
6099
6100 /* If we are only performing a partial link do not bother
6101 to construct any glue. */
6102 if (link_info->relocatable)
6103 return TRUE;
6104
6105 /* Here we have a bfd that is to be included on the link. We have a
6106 hook to do reloc rummaging, before section sizes are nailed down. */
6107 globals = elf32_arm_hash_table (link_info);
6108 BFD_ASSERT (globals != NULL);
6109
6110 check_use_blx (globals);
6111
6112 if (globals->byteswap_code && !bfd_big_endian (abfd))
6113 {
6114 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6115 abfd);
6116 return FALSE;
6117 }
6118
6119 /* PR 5398: If we have not decided to include any loadable sections in
6120 the output then we will not have a glue owner bfd. This is OK, it
6121 just means that there is nothing else for us to do here. */
6122 if (globals->bfd_of_glue_owner == NULL)
6123 return TRUE;
6124
6125 /* Rummage around all the relocs and map the glue vectors. */
6126 sec = abfd->sections;
6127
6128 if (sec == NULL)
6129 return TRUE;
6130
6131 for (; sec != NULL; sec = sec->next)
6132 {
6133 if (sec->reloc_count == 0)
6134 continue;
6135
6136 if ((sec->flags & SEC_EXCLUDE) != 0)
6137 continue;
6138
6139 symtab_hdr = & elf_symtab_hdr (abfd);
6140
6141 /* Load the relocs. */
6142 internal_relocs
6143 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6144
6145 if (internal_relocs == NULL)
6146 goto error_return;
6147
6148 irelend = internal_relocs + sec->reloc_count;
6149 for (irel = internal_relocs; irel < irelend; irel++)
6150 {
6151 long r_type;
6152 unsigned long r_index;
6153
6154 struct elf_link_hash_entry *h;
6155
6156 r_type = ELF32_R_TYPE (irel->r_info);
6157 r_index = ELF32_R_SYM (irel->r_info);
6158
6159 /* These are the only relocation types we care about. */
6160 if ( r_type != R_ARM_PC24
6161 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6162 continue;
6163
6164 /* Get the section contents if we haven't done so already. */
6165 if (contents == NULL)
6166 {
6167 /* Get cached copy if it exists. */
6168 if (elf_section_data (sec)->this_hdr.contents != NULL)
6169 contents = elf_section_data (sec)->this_hdr.contents;
6170 else
6171 {
6172 /* Go get them off disk. */
6173 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6174 goto error_return;
6175 }
6176 }
6177
6178 if (r_type == R_ARM_V4BX)
6179 {
6180 int reg;
6181
6182 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6183 record_arm_bx_glue (link_info, reg);
6184 continue;
6185 }
6186
6187 /* If the relocation is not against a symbol it cannot concern us. */
6188 h = NULL;
6189
6190 /* We don't care about local symbols. */
6191 if (r_index < symtab_hdr->sh_info)
6192 continue;
6193
6194 /* This is an external symbol. */
6195 r_index -= symtab_hdr->sh_info;
6196 h = (struct elf_link_hash_entry *)
6197 elf_sym_hashes (abfd)[r_index];
6198
6199 /* If the relocation is against a static symbol it must be within
6200 the current section and so cannot be a cross ARM/Thumb relocation. */
6201 if (h == NULL)
6202 continue;
6203
6204 /* If the call will go through a PLT entry then we do not need
6205 glue. */
6206 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6207 continue;
6208
6209 switch (r_type)
6210 {
6211 case R_ARM_PC24:
6212 /* This one is a call from arm code. We need to look up
6213 the target of the call. If it is a thumb target, we
6214 insert glue. */
6215 if (h->target_internal == ST_BRANCH_TO_THUMB)
6216 record_arm_to_thumb_glue (link_info, h);
6217 break;
6218
6219 default:
6220 abort ();
6221 }
6222 }
6223
6224 if (contents != NULL
6225 && elf_section_data (sec)->this_hdr.contents != contents)
6226 free (contents);
6227 contents = NULL;
6228
6229 if (internal_relocs != NULL
6230 && elf_section_data (sec)->relocs != internal_relocs)
6231 free (internal_relocs);
6232 internal_relocs = NULL;
6233 }
6234
6235 return TRUE;
6236
6237 error_return:
6238 if (contents != NULL
6239 && elf_section_data (sec)->this_hdr.contents != contents)
6240 free (contents);
6241 if (internal_relocs != NULL
6242 && elf_section_data (sec)->relocs != internal_relocs)
6243 free (internal_relocs);
6244
6245 return FALSE;
6246 }
6247 #endif
6248
6249
6250 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6251
6252 void
6253 bfd_elf32_arm_init_maps (bfd *abfd)
6254 {
6255 Elf_Internal_Sym *isymbuf;
6256 Elf_Internal_Shdr *hdr;
6257 unsigned int i, localsyms;
6258
6259 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6260 if (! is_arm_elf (abfd))
6261 return;
6262
6263 if ((abfd->flags & DYNAMIC) != 0)
6264 return;
6265
6266 hdr = & elf_symtab_hdr (abfd);
6267 localsyms = hdr->sh_info;
6268
6269 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6270 should contain the number of local symbols, which should come before any
6271 global symbols. Mapping symbols are always local. */
6272 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6273 NULL);
6274
6275 /* No internal symbols read? Skip this BFD. */
6276 if (isymbuf == NULL)
6277 return;
6278
6279 for (i = 0; i < localsyms; i++)
6280 {
6281 Elf_Internal_Sym *isym = &isymbuf[i];
6282 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6283 const char *name;
6284
6285 if (sec != NULL
6286 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6287 {
6288 name = bfd_elf_string_from_elf_section (abfd,
6289 hdr->sh_link, isym->st_name);
6290
6291 if (bfd_is_arm_special_symbol_name (name,
6292 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6293 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6294 }
6295 }
6296 }
6297
6298
6299 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6300 say what they wanted. */
6301
6302 void
6303 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6304 {
6305 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6306 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6307
6308 if (globals == NULL)
6309 return;
6310
6311 if (globals->fix_cortex_a8 == -1)
6312 {
6313 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6314 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6315 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6316 || out_attr[Tag_CPU_arch_profile].i == 0))
6317 globals->fix_cortex_a8 = 1;
6318 else
6319 globals->fix_cortex_a8 = 0;
6320 }
6321 }
6322
6323
6324 void
6325 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6326 {
6327 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6328 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6329
6330 if (globals == NULL)
6331 return;
6332 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6333 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6334 {
6335 switch (globals->vfp11_fix)
6336 {
6337 case BFD_ARM_VFP11_FIX_DEFAULT:
6338 case BFD_ARM_VFP11_FIX_NONE:
6339 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6340 break;
6341
6342 default:
6343 /* Give a warning, but do as the user requests anyway. */
6344 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6345 "workaround is not necessary for target architecture"), obfd);
6346 }
6347 }
6348 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6349 /* For earlier architectures, we might need the workaround, but do not
6350 enable it by default. If users is running with broken hardware, they
6351 must enable the erratum fix explicitly. */
6352 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6353 }
6354
6355
6356 enum bfd_arm_vfp11_pipe
6357 {
6358 VFP11_FMAC,
6359 VFP11_LS,
6360 VFP11_DS,
6361 VFP11_BAD
6362 };
6363
6364 /* Return a VFP register number. This is encoded as RX:X for single-precision
6365 registers, or X:RX for double-precision registers, where RX is the group of
6366 four bits in the instruction encoding and X is the single extension bit.
6367 RX and X fields are specified using their lowest (starting) bit. The return
6368 value is:
6369
6370 0...31: single-precision registers s0...s31
6371 32...63: double-precision registers d0...d31.
6372
6373 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6374 encounter VFP3 instructions, so we allow the full range for DP registers. */
6375
6376 static unsigned int
6377 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6378 unsigned int x)
6379 {
6380 if (is_double)
6381 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6382 else
6383 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6384 }
6385
6386 /* Set bits in *WMASK according to a register number REG as encoded by
6387 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6388
6389 static void
6390 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6391 {
6392 if (reg < 32)
6393 *wmask |= 1 << reg;
6394 else if (reg < 48)
6395 *wmask |= 3 << ((reg - 32) * 2);
6396 }
6397
6398 /* Return TRUE if WMASK overwrites anything in REGS. */
6399
6400 static bfd_boolean
6401 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6402 {
6403 int i;
6404
6405 for (i = 0; i < numregs; i++)
6406 {
6407 unsigned int reg = regs[i];
6408
6409 if (reg < 32 && (wmask & (1 << reg)) != 0)
6410 return TRUE;
6411
6412 reg -= 32;
6413
6414 if (reg >= 16)
6415 continue;
6416
6417 if ((wmask & (3 << (reg * 2))) != 0)
6418 return TRUE;
6419 }
6420
6421 return FALSE;
6422 }
6423
6424 /* In this function, we're interested in two things: finding input registers
6425 for VFP data-processing instructions, and finding the set of registers which
6426 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6427 hold the written set, so FLDM etc. are easy to deal with (we're only
6428 interested in 32 SP registers or 16 dp registers, due to the VFP version
6429 implemented by the chip in question). DP registers are marked by setting
6430 both SP registers in the write mask). */
6431
6432 static enum bfd_arm_vfp11_pipe
6433 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6434 int *numregs)
6435 {
6436 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6437 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6438
6439 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6440 {
6441 unsigned int pqrs;
6442 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6443 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6444
6445 pqrs = ((insn & 0x00800000) >> 20)
6446 | ((insn & 0x00300000) >> 19)
6447 | ((insn & 0x00000040) >> 6);
6448
6449 switch (pqrs)
6450 {
6451 case 0: /* fmac[sd]. */
6452 case 1: /* fnmac[sd]. */
6453 case 2: /* fmsc[sd]. */
6454 case 3: /* fnmsc[sd]. */
6455 vpipe = VFP11_FMAC;
6456 bfd_arm_vfp11_write_mask (destmask, fd);
6457 regs[0] = fd;
6458 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6459 regs[2] = fm;
6460 *numregs = 3;
6461 break;
6462
6463 case 4: /* fmul[sd]. */
6464 case 5: /* fnmul[sd]. */
6465 case 6: /* fadd[sd]. */
6466 case 7: /* fsub[sd]. */
6467 vpipe = VFP11_FMAC;
6468 goto vfp_binop;
6469
6470 case 8: /* fdiv[sd]. */
6471 vpipe = VFP11_DS;
6472 vfp_binop:
6473 bfd_arm_vfp11_write_mask (destmask, fd);
6474 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6475 regs[1] = fm;
6476 *numregs = 2;
6477 break;
6478
6479 case 15: /* extended opcode. */
6480 {
6481 unsigned int extn = ((insn >> 15) & 0x1e)
6482 | ((insn >> 7) & 1);
6483
6484 switch (extn)
6485 {
6486 case 0: /* fcpy[sd]. */
6487 case 1: /* fabs[sd]. */
6488 case 2: /* fneg[sd]. */
6489 case 8: /* fcmp[sd]. */
6490 case 9: /* fcmpe[sd]. */
6491 case 10: /* fcmpz[sd]. */
6492 case 11: /* fcmpez[sd]. */
6493 case 16: /* fuito[sd]. */
6494 case 17: /* fsito[sd]. */
6495 case 24: /* ftoui[sd]. */
6496 case 25: /* ftouiz[sd]. */
6497 case 26: /* ftosi[sd]. */
6498 case 27: /* ftosiz[sd]. */
6499 /* These instructions will not bounce due to underflow. */
6500 *numregs = 0;
6501 vpipe = VFP11_FMAC;
6502 break;
6503
6504 case 3: /* fsqrt[sd]. */
6505 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6506 registers to cause the erratum in previous instructions. */
6507 bfd_arm_vfp11_write_mask (destmask, fd);
6508 vpipe = VFP11_DS;
6509 break;
6510
6511 case 15: /* fcvt{ds,sd}. */
6512 {
6513 int rnum = 0;
6514
6515 bfd_arm_vfp11_write_mask (destmask, fd);
6516
6517 /* Only FCVTSD can underflow. */
6518 if ((insn & 0x100) != 0)
6519 regs[rnum++] = fm;
6520
6521 *numregs = rnum;
6522
6523 vpipe = VFP11_FMAC;
6524 }
6525 break;
6526
6527 default:
6528 return VFP11_BAD;
6529 }
6530 }
6531 break;
6532
6533 default:
6534 return VFP11_BAD;
6535 }
6536 }
6537 /* Two-register transfer. */
6538 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6539 {
6540 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6541
6542 if ((insn & 0x100000) == 0)
6543 {
6544 if (is_double)
6545 bfd_arm_vfp11_write_mask (destmask, fm);
6546 else
6547 {
6548 bfd_arm_vfp11_write_mask (destmask, fm);
6549 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6550 }
6551 }
6552
6553 vpipe = VFP11_LS;
6554 }
6555 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6556 {
6557 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6558 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6559
6560 switch (puw)
6561 {
6562 case 0: /* Two-reg transfer. We should catch these above. */
6563 abort ();
6564
6565 case 2: /* fldm[sdx]. */
6566 case 3:
6567 case 5:
6568 {
6569 unsigned int i, offset = insn & 0xff;
6570
6571 if (is_double)
6572 offset >>= 1;
6573
6574 for (i = fd; i < fd + offset; i++)
6575 bfd_arm_vfp11_write_mask (destmask, i);
6576 }
6577 break;
6578
6579 case 4: /* fld[sd]. */
6580 case 6:
6581 bfd_arm_vfp11_write_mask (destmask, fd);
6582 break;
6583
6584 default:
6585 return VFP11_BAD;
6586 }
6587
6588 vpipe = VFP11_LS;
6589 }
6590 /* Single-register transfer. Note L==0. */
6591 else if ((insn & 0x0f100e10) == 0x0e000a10)
6592 {
6593 unsigned int opcode = (insn >> 21) & 7;
6594 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6595
6596 switch (opcode)
6597 {
6598 case 0: /* fmsr/fmdlr. */
6599 case 1: /* fmdhr. */
6600 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6601 destination register. I don't know if this is exactly right,
6602 but it is the conservative choice. */
6603 bfd_arm_vfp11_write_mask (destmask, fn);
6604 break;
6605
6606 case 7: /* fmxr. */
6607 break;
6608 }
6609
6610 vpipe = VFP11_LS;
6611 }
6612
6613 return vpipe;
6614 }
6615
6616
6617 static int elf32_arm_compare_mapping (const void * a, const void * b);
6618
6619
6620 /* Look for potentially-troublesome code sequences which might trigger the
6621 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6622 (available from ARM) for details of the erratum. A short version is
6623 described in ld.texinfo. */
6624
6625 bfd_boolean
6626 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6627 {
6628 asection *sec;
6629 bfd_byte *contents = NULL;
6630 int state = 0;
6631 int regs[3], numregs = 0;
6632 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6633 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6634
6635 if (globals == NULL)
6636 return FALSE;
6637
6638 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6639 The states transition as follows:
6640
6641 0 -> 1 (vector) or 0 -> 2 (scalar)
6642 A VFP FMAC-pipeline instruction has been seen. Fill
6643 regs[0]..regs[numregs-1] with its input operands. Remember this
6644 instruction in 'first_fmac'.
6645
6646 1 -> 2
6647 Any instruction, except for a VFP instruction which overwrites
6648 regs[*].
6649
6650 1 -> 3 [ -> 0 ] or
6651 2 -> 3 [ -> 0 ]
6652 A VFP instruction has been seen which overwrites any of regs[*].
6653 We must make a veneer! Reset state to 0 before examining next
6654 instruction.
6655
6656 2 -> 0
6657 If we fail to match anything in state 2, reset to state 0 and reset
6658 the instruction pointer to the instruction after 'first_fmac'.
6659
6660 If the VFP11 vector mode is in use, there must be at least two unrelated
6661 instructions between anti-dependent VFP11 instructions to properly avoid
6662 triggering the erratum, hence the use of the extra state 1. */
6663
6664 /* If we are only performing a partial link do not bother
6665 to construct any glue. */
6666 if (link_info->relocatable)
6667 return TRUE;
6668
6669 /* Skip if this bfd does not correspond to an ELF image. */
6670 if (! is_arm_elf (abfd))
6671 return TRUE;
6672
6673 /* We should have chosen a fix type by the time we get here. */
6674 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6675
6676 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6677 return TRUE;
6678
6679 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6680 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6681 return TRUE;
6682
6683 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6684 {
6685 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6686 struct _arm_elf_section_data *sec_data;
6687
6688 /* If we don't have executable progbits, we're not interested in this
6689 section. Also skip if section is to be excluded. */
6690 if (elf_section_type (sec) != SHT_PROGBITS
6691 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6692 || (sec->flags & SEC_EXCLUDE) != 0
6693 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6694 || sec->output_section == bfd_abs_section_ptr
6695 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6696 continue;
6697
6698 sec_data = elf32_arm_section_data (sec);
6699
6700 if (sec_data->mapcount == 0)
6701 continue;
6702
6703 if (elf_section_data (sec)->this_hdr.contents != NULL)
6704 contents = elf_section_data (sec)->this_hdr.contents;
6705 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6706 goto error_return;
6707
6708 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6709 elf32_arm_compare_mapping);
6710
6711 for (span = 0; span < sec_data->mapcount; span++)
6712 {
6713 unsigned int span_start = sec_data->map[span].vma;
6714 unsigned int span_end = (span == sec_data->mapcount - 1)
6715 ? sec->size : sec_data->map[span + 1].vma;
6716 char span_type = sec_data->map[span].type;
6717
6718 /* FIXME: Only ARM mode is supported at present. We may need to
6719 support Thumb-2 mode also at some point. */
6720 if (span_type != 'a')
6721 continue;
6722
6723 for (i = span_start; i < span_end;)
6724 {
6725 unsigned int next_i = i + 4;
6726 unsigned int insn = bfd_big_endian (abfd)
6727 ? (contents[i] << 24)
6728 | (contents[i + 1] << 16)
6729 | (contents[i + 2] << 8)
6730 | contents[i + 3]
6731 : (contents[i + 3] << 24)
6732 | (contents[i + 2] << 16)
6733 | (contents[i + 1] << 8)
6734 | contents[i];
6735 unsigned int writemask = 0;
6736 enum bfd_arm_vfp11_pipe vpipe;
6737
6738 switch (state)
6739 {
6740 case 0:
6741 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6742 &numregs);
6743 /* I'm assuming the VFP11 erratum can trigger with denorm
6744 operands on either the FMAC or the DS pipeline. This might
6745 lead to slightly overenthusiastic veneer insertion. */
6746 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6747 {
6748 state = use_vector ? 1 : 2;
6749 first_fmac = i;
6750 veneer_of_insn = insn;
6751 }
6752 break;
6753
6754 case 1:
6755 {
6756 int other_regs[3], other_numregs;
6757 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6758 other_regs,
6759 &other_numregs);
6760 if (vpipe != VFP11_BAD
6761 && bfd_arm_vfp11_antidependency (writemask, regs,
6762 numregs))
6763 state = 3;
6764 else
6765 state = 2;
6766 }
6767 break;
6768
6769 case 2:
6770 {
6771 int other_regs[3], other_numregs;
6772 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6773 other_regs,
6774 &other_numregs);
6775 if (vpipe != VFP11_BAD
6776 && bfd_arm_vfp11_antidependency (writemask, regs,
6777 numregs))
6778 state = 3;
6779 else
6780 {
6781 state = 0;
6782 next_i = first_fmac + 4;
6783 }
6784 }
6785 break;
6786
6787 case 3:
6788 abort (); /* Should be unreachable. */
6789 }
6790
6791 if (state == 3)
6792 {
6793 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6794 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6795
6796 elf32_arm_section_data (sec)->erratumcount += 1;
6797
6798 newerr->u.b.vfp_insn = veneer_of_insn;
6799
6800 switch (span_type)
6801 {
6802 case 'a':
6803 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6804 break;
6805
6806 default:
6807 abort ();
6808 }
6809
6810 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6811 first_fmac);
6812
6813 newerr->vma = -1;
6814
6815 newerr->next = sec_data->erratumlist;
6816 sec_data->erratumlist = newerr;
6817
6818 state = 0;
6819 }
6820
6821 i = next_i;
6822 }
6823 }
6824
6825 if (contents != NULL
6826 && elf_section_data (sec)->this_hdr.contents != contents)
6827 free (contents);
6828 contents = NULL;
6829 }
6830
6831 return TRUE;
6832
6833 error_return:
6834 if (contents != NULL
6835 && elf_section_data (sec)->this_hdr.contents != contents)
6836 free (contents);
6837
6838 return FALSE;
6839 }
6840
6841 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6842 after sections have been laid out, using specially-named symbols. */
6843
6844 void
6845 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6846 struct bfd_link_info *link_info)
6847 {
6848 asection *sec;
6849 struct elf32_arm_link_hash_table *globals;
6850 char *tmp_name;
6851
6852 if (link_info->relocatable)
6853 return;
6854
6855 /* Skip if this bfd does not correspond to an ELF image. */
6856 if (! is_arm_elf (abfd))
6857 return;
6858
6859 globals = elf32_arm_hash_table (link_info);
6860 if (globals == NULL)
6861 return;
6862
6863 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6864 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6865
6866 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6867 {
6868 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6869 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6870
6871 for (; errnode != NULL; errnode = errnode->next)
6872 {
6873 struct elf_link_hash_entry *myh;
6874 bfd_vma vma;
6875
6876 switch (errnode->type)
6877 {
6878 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6879 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6880 /* Find veneer symbol. */
6881 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6882 errnode->u.b.veneer->u.v.id);
6883
6884 myh = elf_link_hash_lookup
6885 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6886
6887 if (myh == NULL)
6888 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6889 "`%s'"), abfd, tmp_name);
6890
6891 vma = myh->root.u.def.section->output_section->vma
6892 + myh->root.u.def.section->output_offset
6893 + myh->root.u.def.value;
6894
6895 errnode->u.b.veneer->vma = vma;
6896 break;
6897
6898 case VFP11_ERRATUM_ARM_VENEER:
6899 case VFP11_ERRATUM_THUMB_VENEER:
6900 /* Find return location. */
6901 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6902 errnode->u.v.id);
6903
6904 myh = elf_link_hash_lookup
6905 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6906
6907 if (myh == NULL)
6908 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6909 "`%s'"), abfd, tmp_name);
6910
6911 vma = myh->root.u.def.section->output_section->vma
6912 + myh->root.u.def.section->output_offset
6913 + myh->root.u.def.value;
6914
6915 errnode->u.v.branch->vma = vma;
6916 break;
6917
6918 default:
6919 abort ();
6920 }
6921 }
6922 }
6923
6924 free (tmp_name);
6925 }
6926
6927
6928 /* Set target relocation values needed during linking. */
6929
6930 void
6931 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6932 struct bfd_link_info *link_info,
6933 int target1_is_rel,
6934 char * target2_type,
6935 int fix_v4bx,
6936 int use_blx,
6937 bfd_arm_vfp11_fix vfp11_fix,
6938 int no_enum_warn, int no_wchar_warn,
6939 int pic_veneer, int fix_cortex_a8,
6940 int fix_arm1176)
6941 {
6942 struct elf32_arm_link_hash_table *globals;
6943
6944 globals = elf32_arm_hash_table (link_info);
6945 if (globals == NULL)
6946 return;
6947
6948 globals->target1_is_rel = target1_is_rel;
6949 if (strcmp (target2_type, "rel") == 0)
6950 globals->target2_reloc = R_ARM_REL32;
6951 else if (strcmp (target2_type, "abs") == 0)
6952 globals->target2_reloc = R_ARM_ABS32;
6953 else if (strcmp (target2_type, "got-rel") == 0)
6954 globals->target2_reloc = R_ARM_GOT_PREL;
6955 else
6956 {
6957 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6958 target2_type);
6959 }
6960 globals->fix_v4bx = fix_v4bx;
6961 globals->use_blx |= use_blx;
6962 globals->vfp11_fix = vfp11_fix;
6963 globals->pic_veneer = pic_veneer;
6964 globals->fix_cortex_a8 = fix_cortex_a8;
6965 globals->fix_arm1176 = fix_arm1176;
6966
6967 BFD_ASSERT (is_arm_elf (output_bfd));
6968 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6969 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6970 }
6971
6972 /* Replace the target offset of a Thumb bl or b.w instruction. */
6973
6974 static void
6975 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6976 {
6977 bfd_vma upper;
6978 bfd_vma lower;
6979 int reloc_sign;
6980
6981 BFD_ASSERT ((offset & 1) == 0);
6982
6983 upper = bfd_get_16 (abfd, insn);
6984 lower = bfd_get_16 (abfd, insn + 2);
6985 reloc_sign = (offset < 0) ? 1 : 0;
6986 upper = (upper & ~(bfd_vma) 0x7ff)
6987 | ((offset >> 12) & 0x3ff)
6988 | (reloc_sign << 10);
6989 lower = (lower & ~(bfd_vma) 0x2fff)
6990 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6991 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6992 | ((offset >> 1) & 0x7ff);
6993 bfd_put_16 (abfd, upper, insn);
6994 bfd_put_16 (abfd, lower, insn + 2);
6995 }
6996
6997 /* Thumb code calling an ARM function. */
6998
6999 static int
7000 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7001 const char * name,
7002 bfd * input_bfd,
7003 bfd * output_bfd,
7004 asection * input_section,
7005 bfd_byte * hit_data,
7006 asection * sym_sec,
7007 bfd_vma offset,
7008 bfd_signed_vma addend,
7009 bfd_vma val,
7010 char **error_message)
7011 {
7012 asection * s = 0;
7013 bfd_vma my_offset;
7014 long int ret_offset;
7015 struct elf_link_hash_entry * myh;
7016 struct elf32_arm_link_hash_table * globals;
7017
7018 myh = find_thumb_glue (info, name, error_message);
7019 if (myh == NULL)
7020 return FALSE;
7021
7022 globals = elf32_arm_hash_table (info);
7023 BFD_ASSERT (globals != NULL);
7024 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7025
7026 my_offset = myh->root.u.def.value;
7027
7028 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7029 THUMB2ARM_GLUE_SECTION_NAME);
7030
7031 BFD_ASSERT (s != NULL);
7032 BFD_ASSERT (s->contents != NULL);
7033 BFD_ASSERT (s->output_section != NULL);
7034
7035 if ((my_offset & 0x01) == 0x01)
7036 {
7037 if (sym_sec != NULL
7038 && sym_sec->owner != NULL
7039 && !INTERWORK_FLAG (sym_sec->owner))
7040 {
7041 (*_bfd_error_handler)
7042 (_("%B(%s): warning: interworking not enabled.\n"
7043 " first occurrence: %B: Thumb call to ARM"),
7044 sym_sec->owner, input_bfd, name);
7045
7046 return FALSE;
7047 }
7048
7049 --my_offset;
7050 myh->root.u.def.value = my_offset;
7051
7052 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7053 s->contents + my_offset);
7054
7055 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7056 s->contents + my_offset + 2);
7057
7058 ret_offset =
7059 /* Address of destination of the stub. */
7060 ((bfd_signed_vma) val)
7061 - ((bfd_signed_vma)
7062 /* Offset from the start of the current section
7063 to the start of the stubs. */
7064 (s->output_offset
7065 /* Offset of the start of this stub from the start of the stubs. */
7066 + my_offset
7067 /* Address of the start of the current section. */
7068 + s->output_section->vma)
7069 /* The branch instruction is 4 bytes into the stub. */
7070 + 4
7071 /* ARM branches work from the pc of the instruction + 8. */
7072 + 8);
7073
7074 put_arm_insn (globals, output_bfd,
7075 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7076 s->contents + my_offset + 4);
7077 }
7078
7079 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7080
7081 /* Now go back and fix up the original BL insn to point to here. */
7082 ret_offset =
7083 /* Address of where the stub is located. */
7084 (s->output_section->vma + s->output_offset + my_offset)
7085 /* Address of where the BL is located. */
7086 - (input_section->output_section->vma + input_section->output_offset
7087 + offset)
7088 /* Addend in the relocation. */
7089 - addend
7090 /* Biassing for PC-relative addressing. */
7091 - 8;
7092
7093 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7094
7095 return TRUE;
7096 }
7097
7098 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7099
7100 static struct elf_link_hash_entry *
7101 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7102 const char * name,
7103 bfd * input_bfd,
7104 bfd * output_bfd,
7105 asection * sym_sec,
7106 bfd_vma val,
7107 asection * s,
7108 char ** error_message)
7109 {
7110 bfd_vma my_offset;
7111 long int ret_offset;
7112 struct elf_link_hash_entry * myh;
7113 struct elf32_arm_link_hash_table * globals;
7114
7115 myh = find_arm_glue (info, name, error_message);
7116 if (myh == NULL)
7117 return NULL;
7118
7119 globals = elf32_arm_hash_table (info);
7120 BFD_ASSERT (globals != NULL);
7121 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7122
7123 my_offset = myh->root.u.def.value;
7124
7125 if ((my_offset & 0x01) == 0x01)
7126 {
7127 if (sym_sec != NULL
7128 && sym_sec->owner != NULL
7129 && !INTERWORK_FLAG (sym_sec->owner))
7130 {
7131 (*_bfd_error_handler)
7132 (_("%B(%s): warning: interworking not enabled.\n"
7133 " first occurrence: %B: arm call to thumb"),
7134 sym_sec->owner, input_bfd, name);
7135 }
7136
7137 --my_offset;
7138 myh->root.u.def.value = my_offset;
7139
7140 if (info->shared || globals->root.is_relocatable_executable
7141 || globals->pic_veneer)
7142 {
7143 /* For relocatable objects we can't use absolute addresses,
7144 so construct the address from a relative offset. */
7145 /* TODO: If the offset is small it's probably worth
7146 constructing the address with adds. */
7147 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7148 s->contents + my_offset);
7149 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7150 s->contents + my_offset + 4);
7151 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7152 s->contents + my_offset + 8);
7153 /* Adjust the offset by 4 for the position of the add,
7154 and 8 for the pipeline offset. */
7155 ret_offset = (val - (s->output_offset
7156 + s->output_section->vma
7157 + my_offset + 12))
7158 | 1;
7159 bfd_put_32 (output_bfd, ret_offset,
7160 s->contents + my_offset + 12);
7161 }
7162 else if (globals->use_blx)
7163 {
7164 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7165 s->contents + my_offset);
7166
7167 /* It's a thumb address. Add the low order bit. */
7168 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7169 s->contents + my_offset + 4);
7170 }
7171 else
7172 {
7173 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7174 s->contents + my_offset);
7175
7176 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7177 s->contents + my_offset + 4);
7178
7179 /* It's a thumb address. Add the low order bit. */
7180 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7181 s->contents + my_offset + 8);
7182
7183 my_offset += 12;
7184 }
7185 }
7186
7187 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7188
7189 return myh;
7190 }
7191
7192 /* Arm code calling a Thumb function. */
7193
7194 static int
7195 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7196 const char * name,
7197 bfd * input_bfd,
7198 bfd * output_bfd,
7199 asection * input_section,
7200 bfd_byte * hit_data,
7201 asection * sym_sec,
7202 bfd_vma offset,
7203 bfd_signed_vma addend,
7204 bfd_vma val,
7205 char **error_message)
7206 {
7207 unsigned long int tmp;
7208 bfd_vma my_offset;
7209 asection * s;
7210 long int ret_offset;
7211 struct elf_link_hash_entry * myh;
7212 struct elf32_arm_link_hash_table * globals;
7213
7214 globals = elf32_arm_hash_table (info);
7215 BFD_ASSERT (globals != NULL);
7216 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7217
7218 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7219 ARM2THUMB_GLUE_SECTION_NAME);
7220 BFD_ASSERT (s != NULL);
7221 BFD_ASSERT (s->contents != NULL);
7222 BFD_ASSERT (s->output_section != NULL);
7223
7224 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7225 sym_sec, val, s, error_message);
7226 if (!myh)
7227 return FALSE;
7228
7229 my_offset = myh->root.u.def.value;
7230 tmp = bfd_get_32 (input_bfd, hit_data);
7231 tmp = tmp & 0xFF000000;
7232
7233 /* Somehow these are both 4 too far, so subtract 8. */
7234 ret_offset = (s->output_offset
7235 + my_offset
7236 + s->output_section->vma
7237 - (input_section->output_offset
7238 + input_section->output_section->vma
7239 + offset + addend)
7240 - 8);
7241
7242 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7243
7244 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7245
7246 return TRUE;
7247 }
7248
7249 /* Populate Arm stub for an exported Thumb function. */
7250
7251 static bfd_boolean
7252 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7253 {
7254 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7255 asection * s;
7256 struct elf_link_hash_entry * myh;
7257 struct elf32_arm_link_hash_entry *eh;
7258 struct elf32_arm_link_hash_table * globals;
7259 asection *sec;
7260 bfd_vma val;
7261 char *error_message;
7262
7263 eh = elf32_arm_hash_entry (h);
7264 /* Allocate stubs for exported Thumb functions on v4t. */
7265 if (eh->export_glue == NULL)
7266 return TRUE;
7267
7268 globals = elf32_arm_hash_table (info);
7269 BFD_ASSERT (globals != NULL);
7270 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7271
7272 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7273 ARM2THUMB_GLUE_SECTION_NAME);
7274 BFD_ASSERT (s != NULL);
7275 BFD_ASSERT (s->contents != NULL);
7276 BFD_ASSERT (s->output_section != NULL);
7277
7278 sec = eh->export_glue->root.u.def.section;
7279
7280 BFD_ASSERT (sec->output_section != NULL);
7281
7282 val = eh->export_glue->root.u.def.value + sec->output_offset
7283 + sec->output_section->vma;
7284
7285 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7286 h->root.u.def.section->owner,
7287 globals->obfd, sec, val, s,
7288 &error_message);
7289 BFD_ASSERT (myh);
7290 return TRUE;
7291 }
7292
7293 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7294
7295 static bfd_vma
7296 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7297 {
7298 bfd_byte *p;
7299 bfd_vma glue_addr;
7300 asection *s;
7301 struct elf32_arm_link_hash_table *globals;
7302
7303 globals = elf32_arm_hash_table (info);
7304 BFD_ASSERT (globals != NULL);
7305 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7306
7307 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7308 ARM_BX_GLUE_SECTION_NAME);
7309 BFD_ASSERT (s != NULL);
7310 BFD_ASSERT (s->contents != NULL);
7311 BFD_ASSERT (s->output_section != NULL);
7312
7313 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7314
7315 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7316
7317 if ((globals->bx_glue_offset[reg] & 1) == 0)
7318 {
7319 p = s->contents + glue_addr;
7320 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7321 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7322 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7323 globals->bx_glue_offset[reg] |= 1;
7324 }
7325
7326 return glue_addr + s->output_section->vma + s->output_offset;
7327 }
7328
7329 /* Generate Arm stubs for exported Thumb symbols. */
7330 static void
7331 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7332 struct bfd_link_info *link_info)
7333 {
7334 struct elf32_arm_link_hash_table * globals;
7335
7336 if (link_info == NULL)
7337 /* Ignore this if we are not called by the ELF backend linker. */
7338 return;
7339
7340 globals = elf32_arm_hash_table (link_info);
7341 if (globals == NULL)
7342 return;
7343
7344 /* If blx is available then exported Thumb symbols are OK and there is
7345 nothing to do. */
7346 if (globals->use_blx)
7347 return;
7348
7349 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7350 link_info);
7351 }
7352
7353 /* Reserve space for COUNT dynamic relocations in relocation selection
7354 SRELOC. */
7355
7356 static void
7357 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7358 bfd_size_type count)
7359 {
7360 struct elf32_arm_link_hash_table *htab;
7361
7362 htab = elf32_arm_hash_table (info);
7363 BFD_ASSERT (htab->root.dynamic_sections_created);
7364 if (sreloc == NULL)
7365 abort ();
7366 sreloc->size += RELOC_SIZE (htab) * count;
7367 }
7368
7369 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7370 dynamic, the relocations should go in SRELOC, otherwise they should
7371 go in the special .rel.iplt section. */
7372
7373 static void
7374 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7375 bfd_size_type count)
7376 {
7377 struct elf32_arm_link_hash_table *htab;
7378
7379 htab = elf32_arm_hash_table (info);
7380 if (!htab->root.dynamic_sections_created)
7381 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7382 else
7383 {
7384 BFD_ASSERT (sreloc != NULL);
7385 sreloc->size += RELOC_SIZE (htab) * count;
7386 }
7387 }
7388
7389 /* Add relocation REL to the end of relocation section SRELOC. */
7390
7391 static void
7392 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7393 asection *sreloc, Elf_Internal_Rela *rel)
7394 {
7395 bfd_byte *loc;
7396 struct elf32_arm_link_hash_table *htab;
7397
7398 htab = elf32_arm_hash_table (info);
7399 if (!htab->root.dynamic_sections_created
7400 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7401 sreloc = htab->root.irelplt;
7402 if (sreloc == NULL)
7403 abort ();
7404 loc = sreloc->contents;
7405 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7406 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7407 abort ();
7408 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7409 }
7410
7411 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7412 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7413 to .plt. */
7414
7415 static void
7416 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7417 bfd_boolean is_iplt_entry,
7418 union gotplt_union *root_plt,
7419 struct arm_plt_info *arm_plt)
7420 {
7421 struct elf32_arm_link_hash_table *htab;
7422 asection *splt;
7423 asection *sgotplt;
7424
7425 htab = elf32_arm_hash_table (info);
7426
7427 if (is_iplt_entry)
7428 {
7429 splt = htab->root.iplt;
7430 sgotplt = htab->root.igotplt;
7431
7432 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7433 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7434 }
7435 else
7436 {
7437 splt = htab->root.splt;
7438 sgotplt = htab->root.sgotplt;
7439
7440 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7441 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7442
7443 /* If this is the first .plt entry, make room for the special
7444 first entry. */
7445 if (splt->size == 0)
7446 splt->size += htab->plt_header_size;
7447 }
7448
7449 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7450 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7451 splt->size += PLT_THUMB_STUB_SIZE;
7452 root_plt->offset = splt->size;
7453 splt->size += htab->plt_entry_size;
7454
7455 if (!htab->symbian_p)
7456 {
7457 /* We also need to make an entry in the .got.plt section, which
7458 will be placed in the .got section by the linker script. */
7459 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7460 sgotplt->size += 4;
7461 }
7462 }
7463
7464 static bfd_vma
7465 arm_movw_immediate (bfd_vma value)
7466 {
7467 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7468 }
7469
7470 static bfd_vma
7471 arm_movt_immediate (bfd_vma value)
7472 {
7473 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7474 }
7475
7476 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7477 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7478 Otherwise, DYNINDX is the index of the symbol in the dynamic
7479 symbol table and SYM_VALUE is undefined.
7480
7481 ROOT_PLT points to the offset of the PLT entry from the start of its
7482 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7483 bookkeeping information. */
7484
7485 static void
7486 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7487 union gotplt_union *root_plt,
7488 struct arm_plt_info *arm_plt,
7489 int dynindx, bfd_vma sym_value)
7490 {
7491 struct elf32_arm_link_hash_table *htab;
7492 asection *sgot;
7493 asection *splt;
7494 asection *srel;
7495 bfd_byte *loc;
7496 bfd_vma plt_index;
7497 Elf_Internal_Rela rel;
7498 bfd_vma plt_header_size;
7499 bfd_vma got_header_size;
7500
7501 htab = elf32_arm_hash_table (info);
7502
7503 /* Pick the appropriate sections and sizes. */
7504 if (dynindx == -1)
7505 {
7506 splt = htab->root.iplt;
7507 sgot = htab->root.igotplt;
7508 srel = htab->root.irelplt;
7509
7510 /* There are no reserved entries in .igot.plt, and no special
7511 first entry in .iplt. */
7512 got_header_size = 0;
7513 plt_header_size = 0;
7514 }
7515 else
7516 {
7517 splt = htab->root.splt;
7518 sgot = htab->root.sgotplt;
7519 srel = htab->root.srelplt;
7520
7521 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7522 plt_header_size = htab->plt_header_size;
7523 }
7524 BFD_ASSERT (splt != NULL && srel != NULL);
7525
7526 /* Fill in the entry in the procedure linkage table. */
7527 if (htab->symbian_p)
7528 {
7529 BFD_ASSERT (dynindx >= 0);
7530 put_arm_insn (htab, output_bfd,
7531 elf32_arm_symbian_plt_entry[0],
7532 splt->contents + root_plt->offset);
7533 bfd_put_32 (output_bfd,
7534 elf32_arm_symbian_plt_entry[1],
7535 splt->contents + root_plt->offset + 4);
7536
7537 /* Fill in the entry in the .rel.plt section. */
7538 rel.r_offset = (splt->output_section->vma
7539 + splt->output_offset
7540 + root_plt->offset + 4);
7541 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7542
7543 /* Get the index in the procedure linkage table which
7544 corresponds to this symbol. This is the index of this symbol
7545 in all the symbols for which we are making plt entries. The
7546 first entry in the procedure linkage table is reserved. */
7547 plt_index = ((root_plt->offset - plt_header_size)
7548 / htab->plt_entry_size);
7549 }
7550 else
7551 {
7552 bfd_vma got_offset, got_address, plt_address;
7553 bfd_vma got_displacement, initial_got_entry;
7554 bfd_byte * ptr;
7555
7556 BFD_ASSERT (sgot != NULL);
7557
7558 /* Get the offset into the .(i)got.plt table of the entry that
7559 corresponds to this function. */
7560 got_offset = (arm_plt->got_offset & -2);
7561
7562 /* Get the index in the procedure linkage table which
7563 corresponds to this symbol. This is the index of this symbol
7564 in all the symbols for which we are making plt entries.
7565 After the reserved .got.plt entries, all symbols appear in
7566 the same order as in .plt. */
7567 plt_index = (got_offset - got_header_size) / 4;
7568
7569 /* Calculate the address of the GOT entry. */
7570 got_address = (sgot->output_section->vma
7571 + sgot->output_offset
7572 + got_offset);
7573
7574 /* ...and the address of the PLT entry. */
7575 plt_address = (splt->output_section->vma
7576 + splt->output_offset
7577 + root_plt->offset);
7578
7579 ptr = splt->contents + root_plt->offset;
7580 if (htab->vxworks_p && info->shared)
7581 {
7582 unsigned int i;
7583 bfd_vma val;
7584
7585 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7586 {
7587 val = elf32_arm_vxworks_shared_plt_entry[i];
7588 if (i == 2)
7589 val |= got_address - sgot->output_section->vma;
7590 if (i == 5)
7591 val |= plt_index * RELOC_SIZE (htab);
7592 if (i == 2 || i == 5)
7593 bfd_put_32 (output_bfd, val, ptr);
7594 else
7595 put_arm_insn (htab, output_bfd, val, ptr);
7596 }
7597 }
7598 else if (htab->vxworks_p)
7599 {
7600 unsigned int i;
7601 bfd_vma val;
7602
7603 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7604 {
7605 val = elf32_arm_vxworks_exec_plt_entry[i];
7606 if (i == 2)
7607 val |= got_address;
7608 if (i == 4)
7609 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7610 if (i == 5)
7611 val |= plt_index * RELOC_SIZE (htab);
7612 if (i == 2 || i == 5)
7613 bfd_put_32 (output_bfd, val, ptr);
7614 else
7615 put_arm_insn (htab, output_bfd, val, ptr);
7616 }
7617
7618 loc = (htab->srelplt2->contents
7619 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7620
7621 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7622 referencing the GOT for this PLT entry. */
7623 rel.r_offset = plt_address + 8;
7624 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7625 rel.r_addend = got_offset;
7626 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7627 loc += RELOC_SIZE (htab);
7628
7629 /* Create the R_ARM_ABS32 relocation referencing the
7630 beginning of the PLT for this GOT entry. */
7631 rel.r_offset = got_address;
7632 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7633 rel.r_addend = 0;
7634 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7635 }
7636 else if (htab->nacl_p)
7637 {
7638 /* Calculate the displacement between the PLT slot and the
7639 common tail that's part of the special initial PLT slot. */
7640 int32_t tail_displacement
7641 = ((splt->output_section->vma + splt->output_offset
7642 + ARM_NACL_PLT_TAIL_OFFSET)
7643 - (plt_address + htab->plt_entry_size + 4));
7644 BFD_ASSERT ((tail_displacement & 3) == 0);
7645 tail_displacement >>= 2;
7646
7647 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7648 || (-tail_displacement & 0xff000000) == 0);
7649
7650 /* Calculate the displacement between the PLT slot and the entry
7651 in the GOT. The offset accounts for the value produced by
7652 adding to pc in the penultimate instruction of the PLT stub. */
7653 got_displacement = (got_address
7654 - (plt_address + htab->plt_entry_size));
7655
7656 /* NaCl does not support interworking at all. */
7657 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7658
7659 put_arm_insn (htab, output_bfd,
7660 elf32_arm_nacl_plt_entry[0]
7661 | arm_movw_immediate (got_displacement),
7662 ptr + 0);
7663 put_arm_insn (htab, output_bfd,
7664 elf32_arm_nacl_plt_entry[1]
7665 | arm_movt_immediate (got_displacement),
7666 ptr + 4);
7667 put_arm_insn (htab, output_bfd,
7668 elf32_arm_nacl_plt_entry[2],
7669 ptr + 8);
7670 put_arm_insn (htab, output_bfd,
7671 elf32_arm_nacl_plt_entry[3]
7672 | (tail_displacement & 0x00ffffff),
7673 ptr + 12);
7674 }
7675 else
7676 {
7677 /* Calculate the displacement between the PLT slot and the
7678 entry in the GOT. The eight-byte offset accounts for the
7679 value produced by adding to pc in the first instruction
7680 of the PLT stub. */
7681 got_displacement = got_address - (plt_address + 8);
7682
7683 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7684
7685 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7686 {
7687 put_thumb_insn (htab, output_bfd,
7688 elf32_arm_plt_thumb_stub[0], ptr - 4);
7689 put_thumb_insn (htab, output_bfd,
7690 elf32_arm_plt_thumb_stub[1], ptr - 2);
7691 }
7692
7693 put_arm_insn (htab, output_bfd,
7694 elf32_arm_plt_entry[0]
7695 | ((got_displacement & 0x0ff00000) >> 20),
7696 ptr + 0);
7697 put_arm_insn (htab, output_bfd,
7698 elf32_arm_plt_entry[1]
7699 | ((got_displacement & 0x000ff000) >> 12),
7700 ptr+ 4);
7701 put_arm_insn (htab, output_bfd,
7702 elf32_arm_plt_entry[2]
7703 | (got_displacement & 0x00000fff),
7704 ptr + 8);
7705 #ifdef FOUR_WORD_PLT
7706 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
7707 #endif
7708 }
7709
7710 /* Fill in the entry in the .rel(a).(i)plt section. */
7711 rel.r_offset = got_address;
7712 rel.r_addend = 0;
7713 if (dynindx == -1)
7714 {
7715 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7716 The dynamic linker or static executable then calls SYM_VALUE
7717 to determine the correct run-time value of the .igot.plt entry. */
7718 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7719 initial_got_entry = sym_value;
7720 }
7721 else
7722 {
7723 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7724 initial_got_entry = (splt->output_section->vma
7725 + splt->output_offset);
7726 }
7727
7728 /* Fill in the entry in the global offset table. */
7729 bfd_put_32 (output_bfd, initial_got_entry,
7730 sgot->contents + got_offset);
7731 }
7732
7733 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7734 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7735 }
7736
7737 /* Some relocations map to different relocations depending on the
7738 target. Return the real relocation. */
7739
7740 static int
7741 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7742 int r_type)
7743 {
7744 switch (r_type)
7745 {
7746 case R_ARM_TARGET1:
7747 if (globals->target1_is_rel)
7748 return R_ARM_REL32;
7749 else
7750 return R_ARM_ABS32;
7751
7752 case R_ARM_TARGET2:
7753 return globals->target2_reloc;
7754
7755 default:
7756 return r_type;
7757 }
7758 }
7759
7760 /* Return the base VMA address which should be subtracted from real addresses
7761 when resolving @dtpoff relocation.
7762 This is PT_TLS segment p_vaddr. */
7763
7764 static bfd_vma
7765 dtpoff_base (struct bfd_link_info *info)
7766 {
7767 /* If tls_sec is NULL, we should have signalled an error already. */
7768 if (elf_hash_table (info)->tls_sec == NULL)
7769 return 0;
7770 return elf_hash_table (info)->tls_sec->vma;
7771 }
7772
7773 /* Return the relocation value for @tpoff relocation
7774 if STT_TLS virtual address is ADDRESS. */
7775
7776 static bfd_vma
7777 tpoff (struct bfd_link_info *info, bfd_vma address)
7778 {
7779 struct elf_link_hash_table *htab = elf_hash_table (info);
7780 bfd_vma base;
7781
7782 /* If tls_sec is NULL, we should have signalled an error already. */
7783 if (htab->tls_sec == NULL)
7784 return 0;
7785 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7786 return address - htab->tls_sec->vma + base;
7787 }
7788
7789 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7790 VALUE is the relocation value. */
7791
7792 static bfd_reloc_status_type
7793 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7794 {
7795 if (value > 0xfff)
7796 return bfd_reloc_overflow;
7797
7798 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7799 bfd_put_32 (abfd, value, data);
7800 return bfd_reloc_ok;
7801 }
7802
7803 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7804 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7805 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7806
7807 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7808 is to then call final_link_relocate. Return other values in the
7809 case of error.
7810
7811 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7812 the pre-relaxed code. It would be nice if the relocs were updated
7813 to match the optimization. */
7814
7815 static bfd_reloc_status_type
7816 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7817 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7818 Elf_Internal_Rela *rel, unsigned long is_local)
7819 {
7820 unsigned long insn;
7821
7822 switch (ELF32_R_TYPE (rel->r_info))
7823 {
7824 default:
7825 return bfd_reloc_notsupported;
7826
7827 case R_ARM_TLS_GOTDESC:
7828 if (is_local)
7829 insn = 0;
7830 else
7831 {
7832 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7833 if (insn & 1)
7834 insn -= 5; /* THUMB */
7835 else
7836 insn -= 8; /* ARM */
7837 }
7838 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7839 return bfd_reloc_continue;
7840
7841 case R_ARM_THM_TLS_DESCSEQ:
7842 /* Thumb insn. */
7843 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7844 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
7845 {
7846 if (is_local)
7847 /* nop */
7848 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7849 }
7850 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
7851 {
7852 if (is_local)
7853 /* nop */
7854 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7855 else
7856 /* ldr rx,[ry] */
7857 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
7858 }
7859 else if ((insn & 0xff87) == 0x4780) /* blx rx */
7860 {
7861 if (is_local)
7862 /* nop */
7863 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7864 else
7865 /* mov r0, rx */
7866 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
7867 contents + rel->r_offset);
7868 }
7869 else
7870 {
7871 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
7872 /* It's a 32 bit instruction, fetch the rest of it for
7873 error generation. */
7874 insn = (insn << 16)
7875 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
7876 (*_bfd_error_handler)
7877 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
7878 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7879 return bfd_reloc_notsupported;
7880 }
7881 break;
7882
7883 case R_ARM_TLS_DESCSEQ:
7884 /* arm insn. */
7885 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7886 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
7887 {
7888 if (is_local)
7889 /* mov rx, ry */
7890 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
7891 contents + rel->r_offset);
7892 }
7893 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
7894 {
7895 if (is_local)
7896 /* nop */
7897 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7898 else
7899 /* ldr rx,[ry] */
7900 bfd_put_32 (input_bfd, insn & 0xfffff000,
7901 contents + rel->r_offset);
7902 }
7903 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
7904 {
7905 if (is_local)
7906 /* nop */
7907 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7908 else
7909 /* mov r0, rx */
7910 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
7911 contents + rel->r_offset);
7912 }
7913 else
7914 {
7915 (*_bfd_error_handler)
7916 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
7917 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7918 return bfd_reloc_notsupported;
7919 }
7920 break;
7921
7922 case R_ARM_TLS_CALL:
7923 /* GD->IE relaxation, turn the instruction into 'nop' or
7924 'ldr r0, [pc,r0]' */
7925 insn = is_local ? 0xe1a00000 : 0xe79f0000;
7926 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7927 break;
7928
7929 case R_ARM_THM_TLS_CALL:
7930 /* GD->IE relaxation */
7931 if (!is_local)
7932 /* add r0,pc; ldr r0, [r0] */
7933 insn = 0x44786800;
7934 else if (arch_has_thumb2_nop (globals))
7935 /* nop.w */
7936 insn = 0xf3af8000;
7937 else
7938 /* nop; nop */
7939 insn = 0xbf00bf00;
7940
7941 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
7942 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
7943 break;
7944 }
7945 return bfd_reloc_ok;
7946 }
7947
7948 /* For a given value of n, calculate the value of G_n as required to
7949 deal with group relocations. We return it in the form of an
7950 encoded constant-and-rotation, together with the final residual. If n is
7951 specified as less than zero, then final_residual is filled with the
7952 input value and no further action is performed. */
7953
7954 static bfd_vma
7955 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
7956 {
7957 int current_n;
7958 bfd_vma g_n;
7959 bfd_vma encoded_g_n = 0;
7960 bfd_vma residual = value; /* Also known as Y_n. */
7961
7962 for (current_n = 0; current_n <= n; current_n++)
7963 {
7964 int shift;
7965
7966 /* Calculate which part of the value to mask. */
7967 if (residual == 0)
7968 shift = 0;
7969 else
7970 {
7971 int msb;
7972
7973 /* Determine the most significant bit in the residual and
7974 align the resulting value to a 2-bit boundary. */
7975 for (msb = 30; msb >= 0; msb -= 2)
7976 if (residual & (3 << msb))
7977 break;
7978
7979 /* The desired shift is now (msb - 6), or zero, whichever
7980 is the greater. */
7981 shift = msb - 6;
7982 if (shift < 0)
7983 shift = 0;
7984 }
7985
7986 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
7987 g_n = residual & (0xff << shift);
7988 encoded_g_n = (g_n >> shift)
7989 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
7990
7991 /* Calculate the residual for the next time around. */
7992 residual &= ~g_n;
7993 }
7994
7995 *final_residual = residual;
7996
7997 return encoded_g_n;
7998 }
7999
8000 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8001 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8002
8003 static int
8004 identify_add_or_sub (bfd_vma insn)
8005 {
8006 int opcode = insn & 0x1e00000;
8007
8008 if (opcode == 1 << 23) /* ADD */
8009 return 1;
8010
8011 if (opcode == 1 << 22) /* SUB */
8012 return -1;
8013
8014 return 0;
8015 }
8016
8017 /* Perform a relocation as part of a final link. */
8018
8019 static bfd_reloc_status_type
8020 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8021 bfd * input_bfd,
8022 bfd * output_bfd,
8023 asection * input_section,
8024 bfd_byte * contents,
8025 Elf_Internal_Rela * rel,
8026 bfd_vma value,
8027 struct bfd_link_info * info,
8028 asection * sym_sec,
8029 const char * sym_name,
8030 unsigned char st_type,
8031 enum arm_st_branch_type branch_type,
8032 struct elf_link_hash_entry * h,
8033 bfd_boolean * unresolved_reloc_p,
8034 char ** error_message)
8035 {
8036 unsigned long r_type = howto->type;
8037 unsigned long r_symndx;
8038 bfd_byte * hit_data = contents + rel->r_offset;
8039 bfd_vma * local_got_offsets;
8040 bfd_vma * local_tlsdesc_gotents;
8041 asection * sgot;
8042 asection * splt;
8043 asection * sreloc = NULL;
8044 asection * srelgot;
8045 bfd_vma addend;
8046 bfd_signed_vma signed_addend;
8047 unsigned char dynreloc_st_type;
8048 bfd_vma dynreloc_value;
8049 struct elf32_arm_link_hash_table * globals;
8050 struct elf32_arm_link_hash_entry *eh;
8051 union gotplt_union *root_plt;
8052 struct arm_plt_info *arm_plt;
8053 bfd_vma plt_offset;
8054 bfd_vma gotplt_offset;
8055 bfd_boolean has_iplt_entry;
8056
8057 globals = elf32_arm_hash_table (info);
8058 if (globals == NULL)
8059 return bfd_reloc_notsupported;
8060
8061 BFD_ASSERT (is_arm_elf (input_bfd));
8062
8063 /* Some relocation types map to different relocations depending on the
8064 target. We pick the right one here. */
8065 r_type = arm_real_reloc_type (globals, r_type);
8066
8067 /* It is possible to have linker relaxations on some TLS access
8068 models. Update our information here. */
8069 r_type = elf32_arm_tls_transition (info, r_type, h);
8070
8071 if (r_type != howto->type)
8072 howto = elf32_arm_howto_from_type (r_type);
8073
8074 /* If the start address has been set, then set the EF_ARM_HASENTRY
8075 flag. Setting this more than once is redundant, but the cost is
8076 not too high, and it keeps the code simple.
8077
8078 The test is done here, rather than somewhere else, because the
8079 start address is only set just before the final link commences.
8080
8081 Note - if the user deliberately sets a start address of 0, the
8082 flag will not be set. */
8083 if (bfd_get_start_address (output_bfd) != 0)
8084 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
8085
8086 eh = (struct elf32_arm_link_hash_entry *) h;
8087 sgot = globals->root.sgot;
8088 local_got_offsets = elf_local_got_offsets (input_bfd);
8089 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8090
8091 if (globals->root.dynamic_sections_created)
8092 srelgot = globals->root.srelgot;
8093 else
8094 srelgot = NULL;
8095
8096 r_symndx = ELF32_R_SYM (rel->r_info);
8097
8098 if (globals->use_rel)
8099 {
8100 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8101
8102 if (addend & ((howto->src_mask + 1) >> 1))
8103 {
8104 signed_addend = -1;
8105 signed_addend &= ~ howto->src_mask;
8106 signed_addend |= addend;
8107 }
8108 else
8109 signed_addend = addend;
8110 }
8111 else
8112 addend = signed_addend = rel->r_addend;
8113
8114 /* Record the symbol information that should be used in dynamic
8115 relocations. */
8116 dynreloc_st_type = st_type;
8117 dynreloc_value = value;
8118 if (branch_type == ST_BRANCH_TO_THUMB)
8119 dynreloc_value |= 1;
8120
8121 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8122 VALUE appropriately for relocations that we resolve at link time. */
8123 has_iplt_entry = FALSE;
8124 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8125 && root_plt->offset != (bfd_vma) -1)
8126 {
8127 plt_offset = root_plt->offset;
8128 gotplt_offset = arm_plt->got_offset;
8129
8130 if (h == NULL || eh->is_iplt)
8131 {
8132 has_iplt_entry = TRUE;
8133 splt = globals->root.iplt;
8134
8135 /* Populate .iplt entries here, because not all of them will
8136 be seen by finish_dynamic_symbol. The lower bit is set if
8137 we have already populated the entry. */
8138 if (plt_offset & 1)
8139 plt_offset--;
8140 else
8141 {
8142 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8143 -1, dynreloc_value);
8144 root_plt->offset |= 1;
8145 }
8146
8147 /* Static relocations always resolve to the .iplt entry. */
8148 st_type = STT_FUNC;
8149 value = (splt->output_section->vma
8150 + splt->output_offset
8151 + plt_offset);
8152 branch_type = ST_BRANCH_TO_ARM;
8153
8154 /* If there are non-call relocations that resolve to the .iplt
8155 entry, then all dynamic ones must too. */
8156 if (arm_plt->noncall_refcount != 0)
8157 {
8158 dynreloc_st_type = st_type;
8159 dynreloc_value = value;
8160 }
8161 }
8162 else
8163 /* We populate the .plt entry in finish_dynamic_symbol. */
8164 splt = globals->root.splt;
8165 }
8166 else
8167 {
8168 splt = NULL;
8169 plt_offset = (bfd_vma) -1;
8170 gotplt_offset = (bfd_vma) -1;
8171 }
8172
8173 switch (r_type)
8174 {
8175 case R_ARM_NONE:
8176 /* We don't need to find a value for this symbol. It's just a
8177 marker. */
8178 *unresolved_reloc_p = FALSE;
8179 return bfd_reloc_ok;
8180
8181 case R_ARM_ABS12:
8182 if (!globals->vxworks_p)
8183 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8184
8185 case R_ARM_PC24:
8186 case R_ARM_ABS32:
8187 case R_ARM_ABS32_NOI:
8188 case R_ARM_REL32:
8189 case R_ARM_REL32_NOI:
8190 case R_ARM_CALL:
8191 case R_ARM_JUMP24:
8192 case R_ARM_XPC25:
8193 case R_ARM_PREL31:
8194 case R_ARM_PLT32:
8195 /* Handle relocations which should use the PLT entry. ABS32/REL32
8196 will use the symbol's value, which may point to a PLT entry, but we
8197 don't need to handle that here. If we created a PLT entry, all
8198 branches in this object should go to it, except if the PLT is too
8199 far away, in which case a long branch stub should be inserted. */
8200 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8201 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8202 && r_type != R_ARM_CALL
8203 && r_type != R_ARM_JUMP24
8204 && r_type != R_ARM_PLT32)
8205 && plt_offset != (bfd_vma) -1)
8206 {
8207 /* If we've created a .plt section, and assigned a PLT entry
8208 to this function, it must either be a STT_GNU_IFUNC reference
8209 or not be known to bind locally. In other cases, we should
8210 have cleared the PLT entry by now. */
8211 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8212
8213 value = (splt->output_section->vma
8214 + splt->output_offset
8215 + plt_offset);
8216 *unresolved_reloc_p = FALSE;
8217 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8218 contents, rel->r_offset, value,
8219 rel->r_addend);
8220 }
8221
8222 /* When generating a shared object or relocatable executable, these
8223 relocations are copied into the output file to be resolved at
8224 run time. */
8225 if ((info->shared || globals->root.is_relocatable_executable)
8226 && (input_section->flags & SEC_ALLOC)
8227 && !(globals->vxworks_p
8228 && strcmp (input_section->output_section->name,
8229 ".tls_vars") == 0)
8230 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8231 || !SYMBOL_CALLS_LOCAL (info, h))
8232 && !(input_bfd == globals->stub_bfd
8233 && strstr (input_section->name, STUB_SUFFIX))
8234 && (h == NULL
8235 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8236 || h->root.type != bfd_link_hash_undefweak)
8237 && r_type != R_ARM_PC24
8238 && r_type != R_ARM_CALL
8239 && r_type != R_ARM_JUMP24
8240 && r_type != R_ARM_PREL31
8241 && r_type != R_ARM_PLT32)
8242 {
8243 Elf_Internal_Rela outrel;
8244 bfd_boolean skip, relocate;
8245
8246 *unresolved_reloc_p = FALSE;
8247
8248 if (sreloc == NULL && globals->root.dynamic_sections_created)
8249 {
8250 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8251 ! globals->use_rel);
8252
8253 if (sreloc == NULL)
8254 return bfd_reloc_notsupported;
8255 }
8256
8257 skip = FALSE;
8258 relocate = FALSE;
8259
8260 outrel.r_addend = addend;
8261 outrel.r_offset =
8262 _bfd_elf_section_offset (output_bfd, info, input_section,
8263 rel->r_offset);
8264 if (outrel.r_offset == (bfd_vma) -1)
8265 skip = TRUE;
8266 else if (outrel.r_offset == (bfd_vma) -2)
8267 skip = TRUE, relocate = TRUE;
8268 outrel.r_offset += (input_section->output_section->vma
8269 + input_section->output_offset);
8270
8271 if (skip)
8272 memset (&outrel, 0, sizeof outrel);
8273 else if (h != NULL
8274 && h->dynindx != -1
8275 && (!info->shared
8276 || !info->symbolic
8277 || !h->def_regular))
8278 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8279 else
8280 {
8281 int symbol;
8282
8283 /* This symbol is local, or marked to become local. */
8284 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8285 if (globals->symbian_p)
8286 {
8287 asection *osec;
8288
8289 /* On Symbian OS, the data segment and text segement
8290 can be relocated independently. Therefore, we
8291 must indicate the segment to which this
8292 relocation is relative. The BPABI allows us to
8293 use any symbol in the right segment; we just use
8294 the section symbol as it is convenient. (We
8295 cannot use the symbol given by "h" directly as it
8296 will not appear in the dynamic symbol table.)
8297
8298 Note that the dynamic linker ignores the section
8299 symbol value, so we don't subtract osec->vma
8300 from the emitted reloc addend. */
8301 if (sym_sec)
8302 osec = sym_sec->output_section;
8303 else
8304 osec = input_section->output_section;
8305 symbol = elf_section_data (osec)->dynindx;
8306 if (symbol == 0)
8307 {
8308 struct elf_link_hash_table *htab = elf_hash_table (info);
8309
8310 if ((osec->flags & SEC_READONLY) == 0
8311 && htab->data_index_section != NULL)
8312 osec = htab->data_index_section;
8313 else
8314 osec = htab->text_index_section;
8315 symbol = elf_section_data (osec)->dynindx;
8316 }
8317 BFD_ASSERT (symbol != 0);
8318 }
8319 else
8320 /* On SVR4-ish systems, the dynamic loader cannot
8321 relocate the text and data segments independently,
8322 so the symbol does not matter. */
8323 symbol = 0;
8324 if (dynreloc_st_type == STT_GNU_IFUNC)
8325 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8326 to the .iplt entry. Instead, every non-call reference
8327 must use an R_ARM_IRELATIVE relocation to obtain the
8328 correct run-time address. */
8329 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8330 else
8331 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8332 if (globals->use_rel)
8333 relocate = TRUE;
8334 else
8335 outrel.r_addend += dynreloc_value;
8336 }
8337
8338 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8339
8340 /* If this reloc is against an external symbol, we do not want to
8341 fiddle with the addend. Otherwise, we need to include the symbol
8342 value so that it becomes an addend for the dynamic reloc. */
8343 if (! relocate)
8344 return bfd_reloc_ok;
8345
8346 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8347 contents, rel->r_offset,
8348 dynreloc_value, (bfd_vma) 0);
8349 }
8350 else switch (r_type)
8351 {
8352 case R_ARM_ABS12:
8353 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8354
8355 case R_ARM_XPC25: /* Arm BLX instruction. */
8356 case R_ARM_CALL:
8357 case R_ARM_JUMP24:
8358 case R_ARM_PC24: /* Arm B/BL instruction. */
8359 case R_ARM_PLT32:
8360 {
8361 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8362
8363 if (r_type == R_ARM_XPC25)
8364 {
8365 /* Check for Arm calling Arm function. */
8366 /* FIXME: Should we translate the instruction into a BL
8367 instruction instead ? */
8368 if (branch_type != ST_BRANCH_TO_THUMB)
8369 (*_bfd_error_handler)
8370 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8371 input_bfd,
8372 h ? h->root.root.string : "(local)");
8373 }
8374 else if (r_type == R_ARM_PC24)
8375 {
8376 /* Check for Arm calling Thumb function. */
8377 if (branch_type == ST_BRANCH_TO_THUMB)
8378 {
8379 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8380 output_bfd, input_section,
8381 hit_data, sym_sec, rel->r_offset,
8382 signed_addend, value,
8383 error_message))
8384 return bfd_reloc_ok;
8385 else
8386 return bfd_reloc_dangerous;
8387 }
8388 }
8389
8390 /* Check if a stub has to be inserted because the
8391 destination is too far or we are changing mode. */
8392 if ( r_type == R_ARM_CALL
8393 || r_type == R_ARM_JUMP24
8394 || r_type == R_ARM_PLT32)
8395 {
8396 enum elf32_arm_stub_type stub_type = arm_stub_none;
8397 struct elf32_arm_link_hash_entry *hash;
8398
8399 hash = (struct elf32_arm_link_hash_entry *) h;
8400 stub_type = arm_type_of_stub (info, input_section, rel,
8401 st_type, &branch_type,
8402 hash, value, sym_sec,
8403 input_bfd, sym_name);
8404
8405 if (stub_type != arm_stub_none)
8406 {
8407 /* The target is out of reach, so redirect the
8408 branch to the local stub for this function. */
8409 stub_entry = elf32_arm_get_stub_entry (input_section,
8410 sym_sec, h,
8411 rel, globals,
8412 stub_type);
8413 {
8414 if (stub_entry != NULL)
8415 value = (stub_entry->stub_offset
8416 + stub_entry->stub_sec->output_offset
8417 + stub_entry->stub_sec->output_section->vma);
8418
8419 if (plt_offset != (bfd_vma) -1)
8420 *unresolved_reloc_p = FALSE;
8421 }
8422 }
8423 else
8424 {
8425 /* If the call goes through a PLT entry, make sure to
8426 check distance to the right destination address. */
8427 if (plt_offset != (bfd_vma) -1)
8428 {
8429 value = (splt->output_section->vma
8430 + splt->output_offset
8431 + plt_offset);
8432 *unresolved_reloc_p = FALSE;
8433 /* The PLT entry is in ARM mode, regardless of the
8434 target function. */
8435 branch_type = ST_BRANCH_TO_ARM;
8436 }
8437 }
8438 }
8439
8440 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8441 where:
8442 S is the address of the symbol in the relocation.
8443 P is address of the instruction being relocated.
8444 A is the addend (extracted from the instruction) in bytes.
8445
8446 S is held in 'value'.
8447 P is the base address of the section containing the
8448 instruction plus the offset of the reloc into that
8449 section, ie:
8450 (input_section->output_section->vma +
8451 input_section->output_offset +
8452 rel->r_offset).
8453 A is the addend, converted into bytes, ie:
8454 (signed_addend * 4)
8455
8456 Note: None of these operations have knowledge of the pipeline
8457 size of the processor, thus it is up to the assembler to
8458 encode this information into the addend. */
8459 value -= (input_section->output_section->vma
8460 + input_section->output_offset);
8461 value -= rel->r_offset;
8462 if (globals->use_rel)
8463 value += (signed_addend << howto->size);
8464 else
8465 /* RELA addends do not have to be adjusted by howto->size. */
8466 value += signed_addend;
8467
8468 signed_addend = value;
8469 signed_addend >>= howto->rightshift;
8470
8471 /* A branch to an undefined weak symbol is turned into a jump to
8472 the next instruction unless a PLT entry will be created.
8473 Do the same for local undefined symbols (but not for STN_UNDEF).
8474 The jump to the next instruction is optimized as a NOP depending
8475 on the architecture. */
8476 if (h ? (h->root.type == bfd_link_hash_undefweak
8477 && plt_offset == (bfd_vma) -1)
8478 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8479 {
8480 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8481
8482 if (arch_has_arm_nop (globals))
8483 value |= 0x0320f000;
8484 else
8485 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8486 }
8487 else
8488 {
8489 /* Perform a signed range check. */
8490 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8491 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8492 return bfd_reloc_overflow;
8493
8494 addend = (value & 2);
8495
8496 value = (signed_addend & howto->dst_mask)
8497 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8498
8499 if (r_type == R_ARM_CALL)
8500 {
8501 /* Set the H bit in the BLX instruction. */
8502 if (branch_type == ST_BRANCH_TO_THUMB)
8503 {
8504 if (addend)
8505 value |= (1 << 24);
8506 else
8507 value &= ~(bfd_vma)(1 << 24);
8508 }
8509
8510 /* Select the correct instruction (BL or BLX). */
8511 /* Only if we are not handling a BL to a stub. In this
8512 case, mode switching is performed by the stub. */
8513 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8514 value |= (1 << 28);
8515 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8516 {
8517 value &= ~(bfd_vma)(1 << 28);
8518 value |= (1 << 24);
8519 }
8520 }
8521 }
8522 }
8523 break;
8524
8525 case R_ARM_ABS32:
8526 value += addend;
8527 if (branch_type == ST_BRANCH_TO_THUMB)
8528 value |= 1;
8529 break;
8530
8531 case R_ARM_ABS32_NOI:
8532 value += addend;
8533 break;
8534
8535 case R_ARM_REL32:
8536 value += addend;
8537 if (branch_type == ST_BRANCH_TO_THUMB)
8538 value |= 1;
8539 value -= (input_section->output_section->vma
8540 + input_section->output_offset + rel->r_offset);
8541 break;
8542
8543 case R_ARM_REL32_NOI:
8544 value += addend;
8545 value -= (input_section->output_section->vma
8546 + input_section->output_offset + rel->r_offset);
8547 break;
8548
8549 case R_ARM_PREL31:
8550 value -= (input_section->output_section->vma
8551 + input_section->output_offset + rel->r_offset);
8552 value += signed_addend;
8553 if (! h || h->root.type != bfd_link_hash_undefweak)
8554 {
8555 /* Check for overflow. */
8556 if ((value ^ (value >> 1)) & (1 << 30))
8557 return bfd_reloc_overflow;
8558 }
8559 value &= 0x7fffffff;
8560 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8561 if (branch_type == ST_BRANCH_TO_THUMB)
8562 value |= 1;
8563 break;
8564 }
8565
8566 bfd_put_32 (input_bfd, value, hit_data);
8567 return bfd_reloc_ok;
8568
8569 case R_ARM_ABS8:
8570 value += addend;
8571
8572 /* There is no way to tell whether the user intended to use a signed or
8573 unsigned addend. When checking for overflow we accept either,
8574 as specified by the AAELF. */
8575 if ((long) value > 0xff || (long) value < -0x80)
8576 return bfd_reloc_overflow;
8577
8578 bfd_put_8 (input_bfd, value, hit_data);
8579 return bfd_reloc_ok;
8580
8581 case R_ARM_ABS16:
8582 value += addend;
8583
8584 /* See comment for R_ARM_ABS8. */
8585 if ((long) value > 0xffff || (long) value < -0x8000)
8586 return bfd_reloc_overflow;
8587
8588 bfd_put_16 (input_bfd, value, hit_data);
8589 return bfd_reloc_ok;
8590
8591 case R_ARM_THM_ABS5:
8592 /* Support ldr and str instructions for the thumb. */
8593 if (globals->use_rel)
8594 {
8595 /* Need to refetch addend. */
8596 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8597 /* ??? Need to determine shift amount from operand size. */
8598 addend >>= howto->rightshift;
8599 }
8600 value += addend;
8601
8602 /* ??? Isn't value unsigned? */
8603 if ((long) value > 0x1f || (long) value < -0x10)
8604 return bfd_reloc_overflow;
8605
8606 /* ??? Value needs to be properly shifted into place first. */
8607 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8608 bfd_put_16 (input_bfd, value, hit_data);
8609 return bfd_reloc_ok;
8610
8611 case R_ARM_THM_ALU_PREL_11_0:
8612 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8613 {
8614 bfd_vma insn;
8615 bfd_signed_vma relocation;
8616
8617 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8618 | bfd_get_16 (input_bfd, hit_data + 2);
8619
8620 if (globals->use_rel)
8621 {
8622 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8623 | ((insn & (1 << 26)) >> 15);
8624 if (insn & 0xf00000)
8625 signed_addend = -signed_addend;
8626 }
8627
8628 relocation = value + signed_addend;
8629 relocation -= Pa (input_section->output_section->vma
8630 + input_section->output_offset
8631 + rel->r_offset);
8632
8633 value = abs (relocation);
8634
8635 if (value >= 0x1000)
8636 return bfd_reloc_overflow;
8637
8638 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8639 | ((value & 0x700) << 4)
8640 | ((value & 0x800) << 15);
8641 if (relocation < 0)
8642 insn |= 0xa00000;
8643
8644 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8645 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8646
8647 return bfd_reloc_ok;
8648 }
8649
8650 case R_ARM_THM_PC8:
8651 /* PR 10073: This reloc is not generated by the GNU toolchain,
8652 but it is supported for compatibility with third party libraries
8653 generated by other compilers, specifically the ARM/IAR. */
8654 {
8655 bfd_vma insn;
8656 bfd_signed_vma relocation;
8657
8658 insn = bfd_get_16 (input_bfd, hit_data);
8659
8660 if (globals->use_rel)
8661 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
8662
8663 relocation = value + addend;
8664 relocation -= Pa (input_section->output_section->vma
8665 + input_section->output_offset
8666 + rel->r_offset);
8667
8668 value = abs (relocation);
8669
8670 /* We do not check for overflow of this reloc. Although strictly
8671 speaking this is incorrect, it appears to be necessary in order
8672 to work with IAR generated relocs. Since GCC and GAS do not
8673 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8674 a problem for them. */
8675 value &= 0x3fc;
8676
8677 insn = (insn & 0xff00) | (value >> 2);
8678
8679 bfd_put_16 (input_bfd, insn, hit_data);
8680
8681 return bfd_reloc_ok;
8682 }
8683
8684 case R_ARM_THM_PC12:
8685 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8686 {
8687 bfd_vma insn;
8688 bfd_signed_vma relocation;
8689
8690 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8691 | bfd_get_16 (input_bfd, hit_data + 2);
8692
8693 if (globals->use_rel)
8694 {
8695 signed_addend = insn & 0xfff;
8696 if (!(insn & (1 << 23)))
8697 signed_addend = -signed_addend;
8698 }
8699
8700 relocation = value + signed_addend;
8701 relocation -= Pa (input_section->output_section->vma
8702 + input_section->output_offset
8703 + rel->r_offset);
8704
8705 value = abs (relocation);
8706
8707 if (value >= 0x1000)
8708 return bfd_reloc_overflow;
8709
8710 insn = (insn & 0xff7ff000) | value;
8711 if (relocation >= 0)
8712 insn |= (1 << 23);
8713
8714 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8715 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8716
8717 return bfd_reloc_ok;
8718 }
8719
8720 case R_ARM_THM_XPC22:
8721 case R_ARM_THM_CALL:
8722 case R_ARM_THM_JUMP24:
8723 /* Thumb BL (branch long instruction). */
8724 {
8725 bfd_vma relocation;
8726 bfd_vma reloc_sign;
8727 bfd_boolean overflow = FALSE;
8728 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8729 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8730 bfd_signed_vma reloc_signed_max;
8731 bfd_signed_vma reloc_signed_min;
8732 bfd_vma check;
8733 bfd_signed_vma signed_check;
8734 int bitsize;
8735 const int thumb2 = using_thumb2 (globals);
8736
8737 /* A branch to an undefined weak symbol is turned into a jump to
8738 the next instruction unless a PLT entry will be created.
8739 The jump to the next instruction is optimized as a NOP.W for
8740 Thumb-2 enabled architectures. */
8741 if (h && h->root.type == bfd_link_hash_undefweak
8742 && plt_offset == (bfd_vma) -1)
8743 {
8744 if (arch_has_thumb2_nop (globals))
8745 {
8746 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8747 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8748 }
8749 else
8750 {
8751 bfd_put_16 (input_bfd, 0xe000, hit_data);
8752 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8753 }
8754 return bfd_reloc_ok;
8755 }
8756
8757 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8758 with Thumb-1) involving the J1 and J2 bits. */
8759 if (globals->use_rel)
8760 {
8761 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8762 bfd_vma upper = upper_insn & 0x3ff;
8763 bfd_vma lower = lower_insn & 0x7ff;
8764 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8765 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8766 bfd_vma i1 = j1 ^ s ? 0 : 1;
8767 bfd_vma i2 = j2 ^ s ? 0 : 1;
8768
8769 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8770 /* Sign extend. */
8771 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8772
8773 signed_addend = addend;
8774 }
8775
8776 if (r_type == R_ARM_THM_XPC22)
8777 {
8778 /* Check for Thumb to Thumb call. */
8779 /* FIXME: Should we translate the instruction into a BL
8780 instruction instead ? */
8781 if (branch_type == ST_BRANCH_TO_THUMB)
8782 (*_bfd_error_handler)
8783 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8784 input_bfd,
8785 h ? h->root.root.string : "(local)");
8786 }
8787 else
8788 {
8789 /* If it is not a call to Thumb, assume call to Arm.
8790 If it is a call relative to a section name, then it is not a
8791 function call at all, but rather a long jump. Calls through
8792 the PLT do not require stubs. */
8793 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8794 {
8795 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8796 {
8797 /* Convert BL to BLX. */
8798 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8799 }
8800 else if (( r_type != R_ARM_THM_CALL)
8801 && (r_type != R_ARM_THM_JUMP24))
8802 {
8803 if (elf32_thumb_to_arm_stub
8804 (info, sym_name, input_bfd, output_bfd, input_section,
8805 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8806 error_message))
8807 return bfd_reloc_ok;
8808 else
8809 return bfd_reloc_dangerous;
8810 }
8811 }
8812 else if (branch_type == ST_BRANCH_TO_THUMB
8813 && globals->use_blx
8814 && r_type == R_ARM_THM_CALL)
8815 {
8816 /* Make sure this is a BL. */
8817 lower_insn |= 0x1800;
8818 }
8819 }
8820
8821 enum elf32_arm_stub_type stub_type = arm_stub_none;
8822 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8823 {
8824 /* Check if a stub has to be inserted because the destination
8825 is too far. */
8826 struct elf32_arm_stub_hash_entry *stub_entry;
8827 struct elf32_arm_link_hash_entry *hash;
8828
8829 hash = (struct elf32_arm_link_hash_entry *) h;
8830
8831 stub_type = arm_type_of_stub (info, input_section, rel,
8832 st_type, &branch_type,
8833 hash, value, sym_sec,
8834 input_bfd, sym_name);
8835
8836 if (stub_type != arm_stub_none)
8837 {
8838 /* The target is out of reach or we are changing modes, so
8839 redirect the branch to the local stub for this
8840 function. */
8841 stub_entry = elf32_arm_get_stub_entry (input_section,
8842 sym_sec, h,
8843 rel, globals,
8844 stub_type);
8845 if (stub_entry != NULL)
8846 {
8847 value = (stub_entry->stub_offset
8848 + stub_entry->stub_sec->output_offset
8849 + stub_entry->stub_sec->output_section->vma);
8850
8851 if (plt_offset != (bfd_vma) -1)
8852 *unresolved_reloc_p = FALSE;
8853 }
8854
8855 /* If this call becomes a call to Arm, force BLX. */
8856 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
8857 {
8858 if ((stub_entry
8859 && !arm_stub_is_thumb (stub_entry->stub_type))
8860 || branch_type != ST_BRANCH_TO_THUMB)
8861 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8862 }
8863 }
8864 }
8865
8866 /* Handle calls via the PLT. */
8867 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
8868 {
8869 value = (splt->output_section->vma
8870 + splt->output_offset
8871 + plt_offset);
8872
8873 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8874 {
8875 /* If the Thumb BLX instruction is available, convert
8876 the BL to a BLX instruction to call the ARM-mode
8877 PLT entry. */
8878 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8879 branch_type = ST_BRANCH_TO_ARM;
8880 }
8881 else
8882 {
8883 /* Target the Thumb stub before the ARM PLT entry. */
8884 value -= PLT_THUMB_STUB_SIZE;
8885 branch_type = ST_BRANCH_TO_THUMB;
8886 }
8887 *unresolved_reloc_p = FALSE;
8888 }
8889
8890 relocation = value + signed_addend;
8891
8892 relocation -= (input_section->output_section->vma
8893 + input_section->output_offset
8894 + rel->r_offset);
8895
8896 check = relocation >> howto->rightshift;
8897
8898 /* If this is a signed value, the rightshift just dropped
8899 leading 1 bits (assuming twos complement). */
8900 if ((bfd_signed_vma) relocation >= 0)
8901 signed_check = check;
8902 else
8903 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
8904
8905 /* Calculate the permissable maximum and minimum values for
8906 this relocation according to whether we're relocating for
8907 Thumb-2 or not. */
8908 bitsize = howto->bitsize;
8909 if (!thumb2)
8910 bitsize -= 2;
8911 reloc_signed_max = (1 << (bitsize - 1)) - 1;
8912 reloc_signed_min = ~reloc_signed_max;
8913
8914 /* Assumes two's complement. */
8915 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8916 overflow = TRUE;
8917
8918 if ((lower_insn & 0x5000) == 0x4000)
8919 /* For a BLX instruction, make sure that the relocation is rounded up
8920 to a word boundary. This follows the semantics of the instruction
8921 which specifies that bit 1 of the target address will come from bit
8922 1 of the base address. */
8923 relocation = (relocation + 2) & ~ 3;
8924
8925 /* Put RELOCATION back into the insn. Assumes two's complement.
8926 We use the Thumb-2 encoding, which is safe even if dealing with
8927 a Thumb-1 instruction by virtue of our overflow check above. */
8928 reloc_sign = (signed_check < 0) ? 1 : 0;
8929 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
8930 | ((relocation >> 12) & 0x3ff)
8931 | (reloc_sign << 10);
8932 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
8933 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
8934 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
8935 | ((relocation >> 1) & 0x7ff);
8936
8937 /* Put the relocated value back in the object file: */
8938 bfd_put_16 (input_bfd, upper_insn, hit_data);
8939 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8940
8941 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8942 }
8943 break;
8944
8945 case R_ARM_THM_JUMP19:
8946 /* Thumb32 conditional branch instruction. */
8947 {
8948 bfd_vma relocation;
8949 bfd_boolean overflow = FALSE;
8950 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8951 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8952 bfd_signed_vma reloc_signed_max = 0xffffe;
8953 bfd_signed_vma reloc_signed_min = -0x100000;
8954 bfd_signed_vma signed_check;
8955
8956 /* Need to refetch the addend, reconstruct the top three bits,
8957 and squish the two 11 bit pieces together. */
8958 if (globals->use_rel)
8959 {
8960 bfd_vma S = (upper_insn & 0x0400) >> 10;
8961 bfd_vma upper = (upper_insn & 0x003f);
8962 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
8963 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
8964 bfd_vma lower = (lower_insn & 0x07ff);
8965
8966 upper |= J1 << 6;
8967 upper |= J2 << 7;
8968 upper |= (!S) << 8;
8969 upper -= 0x0100; /* Sign extend. */
8970
8971 addend = (upper << 12) | (lower << 1);
8972 signed_addend = addend;
8973 }
8974
8975 /* Handle calls via the PLT. */
8976 if (plt_offset != (bfd_vma) -1)
8977 {
8978 value = (splt->output_section->vma
8979 + splt->output_offset
8980 + plt_offset);
8981 /* Target the Thumb stub before the ARM PLT entry. */
8982 value -= PLT_THUMB_STUB_SIZE;
8983 *unresolved_reloc_p = FALSE;
8984 }
8985
8986 /* ??? Should handle interworking? GCC might someday try to
8987 use this for tail calls. */
8988
8989 relocation = value + signed_addend;
8990 relocation -= (input_section->output_section->vma
8991 + input_section->output_offset
8992 + rel->r_offset);
8993 signed_check = (bfd_signed_vma) relocation;
8994
8995 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8996 overflow = TRUE;
8997
8998 /* Put RELOCATION back into the insn. */
8999 {
9000 bfd_vma S = (relocation & 0x00100000) >> 20;
9001 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9002 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9003 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9004 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9005
9006 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9007 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9008 }
9009
9010 /* Put the relocated value back in the object file: */
9011 bfd_put_16 (input_bfd, upper_insn, hit_data);
9012 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9013
9014 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9015 }
9016
9017 case R_ARM_THM_JUMP11:
9018 case R_ARM_THM_JUMP8:
9019 case R_ARM_THM_JUMP6:
9020 /* Thumb B (branch) instruction). */
9021 {
9022 bfd_signed_vma relocation;
9023 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9024 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9025 bfd_signed_vma signed_check;
9026
9027 /* CZB cannot jump backward. */
9028 if (r_type == R_ARM_THM_JUMP6)
9029 reloc_signed_min = 0;
9030
9031 if (globals->use_rel)
9032 {
9033 /* Need to refetch addend. */
9034 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9035 if (addend & ((howto->src_mask + 1) >> 1))
9036 {
9037 signed_addend = -1;
9038 signed_addend &= ~ howto->src_mask;
9039 signed_addend |= addend;
9040 }
9041 else
9042 signed_addend = addend;
9043 /* The value in the insn has been right shifted. We need to
9044 undo this, so that we can perform the address calculation
9045 in terms of bytes. */
9046 signed_addend <<= howto->rightshift;
9047 }
9048 relocation = value + signed_addend;
9049
9050 relocation -= (input_section->output_section->vma
9051 + input_section->output_offset
9052 + rel->r_offset);
9053
9054 relocation >>= howto->rightshift;
9055 signed_check = relocation;
9056
9057 if (r_type == R_ARM_THM_JUMP6)
9058 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9059 else
9060 relocation &= howto->dst_mask;
9061 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9062
9063 bfd_put_16 (input_bfd, relocation, hit_data);
9064
9065 /* Assumes two's complement. */
9066 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9067 return bfd_reloc_overflow;
9068
9069 return bfd_reloc_ok;
9070 }
9071
9072 case R_ARM_ALU_PCREL7_0:
9073 case R_ARM_ALU_PCREL15_8:
9074 case R_ARM_ALU_PCREL23_15:
9075 {
9076 bfd_vma insn;
9077 bfd_vma relocation;
9078
9079 insn = bfd_get_32 (input_bfd, hit_data);
9080 if (globals->use_rel)
9081 {
9082 /* Extract the addend. */
9083 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9084 signed_addend = addend;
9085 }
9086 relocation = value + signed_addend;
9087
9088 relocation -= (input_section->output_section->vma
9089 + input_section->output_offset
9090 + rel->r_offset);
9091 insn = (insn & ~0xfff)
9092 | ((howto->bitpos << 7) & 0xf00)
9093 | ((relocation >> howto->bitpos) & 0xff);
9094 bfd_put_32 (input_bfd, value, hit_data);
9095 }
9096 return bfd_reloc_ok;
9097
9098 case R_ARM_GNU_VTINHERIT:
9099 case R_ARM_GNU_VTENTRY:
9100 return bfd_reloc_ok;
9101
9102 case R_ARM_GOTOFF32:
9103 /* Relocation is relative to the start of the
9104 global offset table. */
9105
9106 BFD_ASSERT (sgot != NULL);
9107 if (sgot == NULL)
9108 return bfd_reloc_notsupported;
9109
9110 /* If we are addressing a Thumb function, we need to adjust the
9111 address by one, so that attempts to call the function pointer will
9112 correctly interpret it as Thumb code. */
9113 if (branch_type == ST_BRANCH_TO_THUMB)
9114 value += 1;
9115
9116 /* Note that sgot->output_offset is not involved in this
9117 calculation. We always want the start of .got. If we
9118 define _GLOBAL_OFFSET_TABLE in a different way, as is
9119 permitted by the ABI, we might have to change this
9120 calculation. */
9121 value -= sgot->output_section->vma;
9122 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9123 contents, rel->r_offset, value,
9124 rel->r_addend);
9125
9126 case R_ARM_GOTPC:
9127 /* Use global offset table as symbol value. */
9128 BFD_ASSERT (sgot != NULL);
9129
9130 if (sgot == NULL)
9131 return bfd_reloc_notsupported;
9132
9133 *unresolved_reloc_p = FALSE;
9134 value = sgot->output_section->vma;
9135 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9136 contents, rel->r_offset, value,
9137 rel->r_addend);
9138
9139 case R_ARM_GOT32:
9140 case R_ARM_GOT_PREL:
9141 /* Relocation is to the entry for this symbol in the
9142 global offset table. */
9143 if (sgot == NULL)
9144 return bfd_reloc_notsupported;
9145
9146 if (dynreloc_st_type == STT_GNU_IFUNC
9147 && plt_offset != (bfd_vma) -1
9148 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9149 {
9150 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9151 symbol, and the relocation resolves directly to the runtime
9152 target rather than to the .iplt entry. This means that any
9153 .got entry would be the same value as the .igot.plt entry,
9154 so there's no point creating both. */
9155 sgot = globals->root.igotplt;
9156 value = sgot->output_offset + gotplt_offset;
9157 }
9158 else if (h != NULL)
9159 {
9160 bfd_vma off;
9161
9162 off = h->got.offset;
9163 BFD_ASSERT (off != (bfd_vma) -1);
9164 if ((off & 1) != 0)
9165 {
9166 /* We have already processsed one GOT relocation against
9167 this symbol. */
9168 off &= ~1;
9169 if (globals->root.dynamic_sections_created
9170 && !SYMBOL_REFERENCES_LOCAL (info, h))
9171 *unresolved_reloc_p = FALSE;
9172 }
9173 else
9174 {
9175 Elf_Internal_Rela outrel;
9176
9177 if (!SYMBOL_REFERENCES_LOCAL (info, h))
9178 {
9179 /* If the symbol doesn't resolve locally in a static
9180 object, we have an undefined reference. If the
9181 symbol doesn't resolve locally in a dynamic object,
9182 it should be resolved by the dynamic linker. */
9183 if (globals->root.dynamic_sections_created)
9184 {
9185 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9186 *unresolved_reloc_p = FALSE;
9187 }
9188 else
9189 outrel.r_info = 0;
9190 outrel.r_addend = 0;
9191 }
9192 else
9193 {
9194 if (dynreloc_st_type == STT_GNU_IFUNC)
9195 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9196 else if (info->shared)
9197 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9198 else
9199 outrel.r_info = 0;
9200 outrel.r_addend = dynreloc_value;
9201 }
9202
9203 /* The GOT entry is initialized to zero by default.
9204 See if we should install a different value. */
9205 if (outrel.r_addend != 0
9206 && (outrel.r_info == 0 || globals->use_rel))
9207 {
9208 bfd_put_32 (output_bfd, outrel.r_addend,
9209 sgot->contents + off);
9210 outrel.r_addend = 0;
9211 }
9212
9213 if (outrel.r_info != 0)
9214 {
9215 outrel.r_offset = (sgot->output_section->vma
9216 + sgot->output_offset
9217 + off);
9218 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9219 }
9220 h->got.offset |= 1;
9221 }
9222 value = sgot->output_offset + off;
9223 }
9224 else
9225 {
9226 bfd_vma off;
9227
9228 BFD_ASSERT (local_got_offsets != NULL &&
9229 local_got_offsets[r_symndx] != (bfd_vma) -1);
9230
9231 off = local_got_offsets[r_symndx];
9232
9233 /* The offset must always be a multiple of 4. We use the
9234 least significant bit to record whether we have already
9235 generated the necessary reloc. */
9236 if ((off & 1) != 0)
9237 off &= ~1;
9238 else
9239 {
9240 if (globals->use_rel)
9241 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9242
9243 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9244 {
9245 Elf_Internal_Rela outrel;
9246
9247 outrel.r_addend = addend + dynreloc_value;
9248 outrel.r_offset = (sgot->output_section->vma
9249 + sgot->output_offset
9250 + off);
9251 if (dynreloc_st_type == STT_GNU_IFUNC)
9252 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9253 else
9254 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9255 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9256 }
9257
9258 local_got_offsets[r_symndx] |= 1;
9259 }
9260
9261 value = sgot->output_offset + off;
9262 }
9263 if (r_type != R_ARM_GOT32)
9264 value += sgot->output_section->vma;
9265
9266 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9267 contents, rel->r_offset, value,
9268 rel->r_addend);
9269
9270 case R_ARM_TLS_LDO32:
9271 value = value - dtpoff_base (info);
9272
9273 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9274 contents, rel->r_offset, value,
9275 rel->r_addend);
9276
9277 case R_ARM_TLS_LDM32:
9278 {
9279 bfd_vma off;
9280
9281 if (sgot == NULL)
9282 abort ();
9283
9284 off = globals->tls_ldm_got.offset;
9285
9286 if ((off & 1) != 0)
9287 off &= ~1;
9288 else
9289 {
9290 /* If we don't know the module number, create a relocation
9291 for it. */
9292 if (info->shared)
9293 {
9294 Elf_Internal_Rela outrel;
9295
9296 if (srelgot == NULL)
9297 abort ();
9298
9299 outrel.r_addend = 0;
9300 outrel.r_offset = (sgot->output_section->vma
9301 + sgot->output_offset + off);
9302 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9303
9304 if (globals->use_rel)
9305 bfd_put_32 (output_bfd, outrel.r_addend,
9306 sgot->contents + off);
9307
9308 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9309 }
9310 else
9311 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9312
9313 globals->tls_ldm_got.offset |= 1;
9314 }
9315
9316 value = sgot->output_section->vma + sgot->output_offset + off
9317 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9318
9319 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9320 contents, rel->r_offset, value,
9321 rel->r_addend);
9322 }
9323
9324 case R_ARM_TLS_CALL:
9325 case R_ARM_THM_TLS_CALL:
9326 case R_ARM_TLS_GD32:
9327 case R_ARM_TLS_IE32:
9328 case R_ARM_TLS_GOTDESC:
9329 case R_ARM_TLS_DESCSEQ:
9330 case R_ARM_THM_TLS_DESCSEQ:
9331 {
9332 bfd_vma off, offplt;
9333 int indx = 0;
9334 char tls_type;
9335
9336 BFD_ASSERT (sgot != NULL);
9337
9338 if (h != NULL)
9339 {
9340 bfd_boolean dyn;
9341 dyn = globals->root.dynamic_sections_created;
9342 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9343 && (!info->shared
9344 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9345 {
9346 *unresolved_reloc_p = FALSE;
9347 indx = h->dynindx;
9348 }
9349 off = h->got.offset;
9350 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9351 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9352 }
9353 else
9354 {
9355 BFD_ASSERT (local_got_offsets != NULL);
9356 off = local_got_offsets[r_symndx];
9357 offplt = local_tlsdesc_gotents[r_symndx];
9358 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9359 }
9360
9361 /* Linker relaxations happens from one of the
9362 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9363 if (ELF32_R_TYPE(rel->r_info) != r_type)
9364 tls_type = GOT_TLS_IE;
9365
9366 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9367
9368 if ((off & 1) != 0)
9369 off &= ~1;
9370 else
9371 {
9372 bfd_boolean need_relocs = FALSE;
9373 Elf_Internal_Rela outrel;
9374 int cur_off = off;
9375
9376 /* The GOT entries have not been initialized yet. Do it
9377 now, and emit any relocations. If both an IE GOT and a
9378 GD GOT are necessary, we emit the GD first. */
9379
9380 if ((info->shared || indx != 0)
9381 && (h == NULL
9382 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9383 || h->root.type != bfd_link_hash_undefweak))
9384 {
9385 need_relocs = TRUE;
9386 BFD_ASSERT (srelgot != NULL);
9387 }
9388
9389 if (tls_type & GOT_TLS_GDESC)
9390 {
9391 bfd_byte *loc;
9392
9393 /* We should have relaxed, unless this is an undefined
9394 weak symbol. */
9395 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9396 || info->shared);
9397 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9398 <= globals->root.sgotplt->size);
9399
9400 outrel.r_addend = 0;
9401 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9402 + globals->root.sgotplt->output_offset
9403 + offplt
9404 + globals->sgotplt_jump_table_size);
9405
9406 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9407 sreloc = globals->root.srelplt;
9408 loc = sreloc->contents;
9409 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9410 BFD_ASSERT (loc + RELOC_SIZE (globals)
9411 <= sreloc->contents + sreloc->size);
9412
9413 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9414
9415 /* For globals, the first word in the relocation gets
9416 the relocation index and the top bit set, or zero,
9417 if we're binding now. For locals, it gets the
9418 symbol's offset in the tls section. */
9419 bfd_put_32 (output_bfd,
9420 !h ? value - elf_hash_table (info)->tls_sec->vma
9421 : info->flags & DF_BIND_NOW ? 0
9422 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9423 globals->root.sgotplt->contents + offplt
9424 + globals->sgotplt_jump_table_size);
9425
9426 /* Second word in the relocation is always zero. */
9427 bfd_put_32 (output_bfd, 0,
9428 globals->root.sgotplt->contents + offplt
9429 + globals->sgotplt_jump_table_size + 4);
9430 }
9431 if (tls_type & GOT_TLS_GD)
9432 {
9433 if (need_relocs)
9434 {
9435 outrel.r_addend = 0;
9436 outrel.r_offset = (sgot->output_section->vma
9437 + sgot->output_offset
9438 + cur_off);
9439 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9440
9441 if (globals->use_rel)
9442 bfd_put_32 (output_bfd, outrel.r_addend,
9443 sgot->contents + cur_off);
9444
9445 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9446
9447 if (indx == 0)
9448 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9449 sgot->contents + cur_off + 4);
9450 else
9451 {
9452 outrel.r_addend = 0;
9453 outrel.r_info = ELF32_R_INFO (indx,
9454 R_ARM_TLS_DTPOFF32);
9455 outrel.r_offset += 4;
9456
9457 if (globals->use_rel)
9458 bfd_put_32 (output_bfd, outrel.r_addend,
9459 sgot->contents + cur_off + 4);
9460
9461 elf32_arm_add_dynreloc (output_bfd, info,
9462 srelgot, &outrel);
9463 }
9464 }
9465 else
9466 {
9467 /* If we are not emitting relocations for a
9468 general dynamic reference, then we must be in a
9469 static link or an executable link with the
9470 symbol binding locally. Mark it as belonging
9471 to module 1, the executable. */
9472 bfd_put_32 (output_bfd, 1,
9473 sgot->contents + cur_off);
9474 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9475 sgot->contents + cur_off + 4);
9476 }
9477
9478 cur_off += 8;
9479 }
9480
9481 if (tls_type & GOT_TLS_IE)
9482 {
9483 if (need_relocs)
9484 {
9485 if (indx == 0)
9486 outrel.r_addend = value - dtpoff_base (info);
9487 else
9488 outrel.r_addend = 0;
9489 outrel.r_offset = (sgot->output_section->vma
9490 + sgot->output_offset
9491 + cur_off);
9492 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9493
9494 if (globals->use_rel)
9495 bfd_put_32 (output_bfd, outrel.r_addend,
9496 sgot->contents + cur_off);
9497
9498 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9499 }
9500 else
9501 bfd_put_32 (output_bfd, tpoff (info, value),
9502 sgot->contents + cur_off);
9503 cur_off += 4;
9504 }
9505
9506 if (h != NULL)
9507 h->got.offset |= 1;
9508 else
9509 local_got_offsets[r_symndx] |= 1;
9510 }
9511
9512 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9513 off += 8;
9514 else if (tls_type & GOT_TLS_GDESC)
9515 off = offplt;
9516
9517 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9518 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9519 {
9520 bfd_signed_vma offset;
9521 /* TLS stubs are arm mode. The original symbol is a
9522 data object, so branch_type is bogus. */
9523 branch_type = ST_BRANCH_TO_ARM;
9524 enum elf32_arm_stub_type stub_type
9525 = arm_type_of_stub (info, input_section, rel,
9526 st_type, &branch_type,
9527 (struct elf32_arm_link_hash_entry *)h,
9528 globals->tls_trampoline, globals->root.splt,
9529 input_bfd, sym_name);
9530
9531 if (stub_type != arm_stub_none)
9532 {
9533 struct elf32_arm_stub_hash_entry *stub_entry
9534 = elf32_arm_get_stub_entry
9535 (input_section, globals->root.splt, 0, rel,
9536 globals, stub_type);
9537 offset = (stub_entry->stub_offset
9538 + stub_entry->stub_sec->output_offset
9539 + stub_entry->stub_sec->output_section->vma);
9540 }
9541 else
9542 offset = (globals->root.splt->output_section->vma
9543 + globals->root.splt->output_offset
9544 + globals->tls_trampoline);
9545
9546 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9547 {
9548 unsigned long inst;
9549
9550 offset -= (input_section->output_section->vma
9551 + input_section->output_offset
9552 + rel->r_offset + 8);
9553
9554 inst = offset >> 2;
9555 inst &= 0x00ffffff;
9556 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9557 }
9558 else
9559 {
9560 /* Thumb blx encodes the offset in a complicated
9561 fashion. */
9562 unsigned upper_insn, lower_insn;
9563 unsigned neg;
9564
9565 offset -= (input_section->output_section->vma
9566 + input_section->output_offset
9567 + rel->r_offset + 4);
9568
9569 if (stub_type != arm_stub_none
9570 && arm_stub_is_thumb (stub_type))
9571 {
9572 lower_insn = 0xd000;
9573 }
9574 else
9575 {
9576 lower_insn = 0xc000;
9577 /* Round up the offset to a word boundary */
9578 offset = (offset + 2) & ~2;
9579 }
9580
9581 neg = offset < 0;
9582 upper_insn = (0xf000
9583 | ((offset >> 12) & 0x3ff)
9584 | (neg << 10));
9585 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9586 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9587 | ((offset >> 1) & 0x7ff);
9588 bfd_put_16 (input_bfd, upper_insn, hit_data);
9589 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9590 return bfd_reloc_ok;
9591 }
9592 }
9593 /* These relocations needs special care, as besides the fact
9594 they point somewhere in .gotplt, the addend must be
9595 adjusted accordingly depending on the type of instruction
9596 we refer to */
9597 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9598 {
9599 unsigned long data, insn;
9600 unsigned thumb;
9601
9602 data = bfd_get_32 (input_bfd, hit_data);
9603 thumb = data & 1;
9604 data &= ~1u;
9605
9606 if (thumb)
9607 {
9608 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9609 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9610 insn = (insn << 16)
9611 | bfd_get_16 (input_bfd,
9612 contents + rel->r_offset - data + 2);
9613 if ((insn & 0xf800c000) == 0xf000c000)
9614 /* bl/blx */
9615 value = -6;
9616 else if ((insn & 0xffffff00) == 0x4400)
9617 /* add */
9618 value = -5;
9619 else
9620 {
9621 (*_bfd_error_handler)
9622 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9623 input_bfd, input_section,
9624 (unsigned long)rel->r_offset, insn);
9625 return bfd_reloc_notsupported;
9626 }
9627 }
9628 else
9629 {
9630 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9631
9632 switch (insn >> 24)
9633 {
9634 case 0xeb: /* bl */
9635 case 0xfa: /* blx */
9636 value = -4;
9637 break;
9638
9639 case 0xe0: /* add */
9640 value = -8;
9641 break;
9642
9643 default:
9644 (*_bfd_error_handler)
9645 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9646 input_bfd, input_section,
9647 (unsigned long)rel->r_offset, insn);
9648 return bfd_reloc_notsupported;
9649 }
9650 }
9651
9652 value += ((globals->root.sgotplt->output_section->vma
9653 + globals->root.sgotplt->output_offset + off)
9654 - (input_section->output_section->vma
9655 + input_section->output_offset
9656 + rel->r_offset)
9657 + globals->sgotplt_jump_table_size);
9658 }
9659 else
9660 value = ((globals->root.sgot->output_section->vma
9661 + globals->root.sgot->output_offset + off)
9662 - (input_section->output_section->vma
9663 + input_section->output_offset + rel->r_offset));
9664
9665 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9666 contents, rel->r_offset, value,
9667 rel->r_addend);
9668 }
9669
9670 case R_ARM_TLS_LE32:
9671 if (info->shared && !info->pie)
9672 {
9673 (*_bfd_error_handler)
9674 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9675 input_bfd, input_section,
9676 (long) rel->r_offset, howto->name);
9677 return bfd_reloc_notsupported;
9678 }
9679 else
9680 value = tpoff (info, value);
9681
9682 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9683 contents, rel->r_offset, value,
9684 rel->r_addend);
9685
9686 case R_ARM_V4BX:
9687 if (globals->fix_v4bx)
9688 {
9689 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9690
9691 /* Ensure that we have a BX instruction. */
9692 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9693
9694 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9695 {
9696 /* Branch to veneer. */
9697 bfd_vma glue_addr;
9698 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9699 glue_addr -= input_section->output_section->vma
9700 + input_section->output_offset
9701 + rel->r_offset + 8;
9702 insn = (insn & 0xf0000000) | 0x0a000000
9703 | ((glue_addr >> 2) & 0x00ffffff);
9704 }
9705 else
9706 {
9707 /* Preserve Rm (lowest four bits) and the condition code
9708 (highest four bits). Other bits encode MOV PC,Rm. */
9709 insn = (insn & 0xf000000f) | 0x01a0f000;
9710 }
9711
9712 bfd_put_32 (input_bfd, insn, hit_data);
9713 }
9714 return bfd_reloc_ok;
9715
9716 case R_ARM_MOVW_ABS_NC:
9717 case R_ARM_MOVT_ABS:
9718 case R_ARM_MOVW_PREL_NC:
9719 case R_ARM_MOVT_PREL:
9720 /* Until we properly support segment-base-relative addressing then
9721 we assume the segment base to be zero, as for the group relocations.
9722 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9723 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9724 case R_ARM_MOVW_BREL_NC:
9725 case R_ARM_MOVW_BREL:
9726 case R_ARM_MOVT_BREL:
9727 {
9728 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9729
9730 if (globals->use_rel)
9731 {
9732 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9733 signed_addend = (addend ^ 0x8000) - 0x8000;
9734 }
9735
9736 value += signed_addend;
9737
9738 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9739 value -= (input_section->output_section->vma
9740 + input_section->output_offset + rel->r_offset);
9741
9742 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9743 return bfd_reloc_overflow;
9744
9745 if (branch_type == ST_BRANCH_TO_THUMB)
9746 value |= 1;
9747
9748 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9749 || r_type == R_ARM_MOVT_BREL)
9750 value >>= 16;
9751
9752 insn &= 0xfff0f000;
9753 insn |= value & 0xfff;
9754 insn |= (value & 0xf000) << 4;
9755 bfd_put_32 (input_bfd, insn, hit_data);
9756 }
9757 return bfd_reloc_ok;
9758
9759 case R_ARM_THM_MOVW_ABS_NC:
9760 case R_ARM_THM_MOVT_ABS:
9761 case R_ARM_THM_MOVW_PREL_NC:
9762 case R_ARM_THM_MOVT_PREL:
9763 /* Until we properly support segment-base-relative addressing then
9764 we assume the segment base to be zero, as for the above relocations.
9765 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9766 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9767 as R_ARM_THM_MOVT_ABS. */
9768 case R_ARM_THM_MOVW_BREL_NC:
9769 case R_ARM_THM_MOVW_BREL:
9770 case R_ARM_THM_MOVT_BREL:
9771 {
9772 bfd_vma insn;
9773
9774 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9775 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9776
9777 if (globals->use_rel)
9778 {
9779 addend = ((insn >> 4) & 0xf000)
9780 | ((insn >> 15) & 0x0800)
9781 | ((insn >> 4) & 0x0700)
9782 | (insn & 0x00ff);
9783 signed_addend = (addend ^ 0x8000) - 0x8000;
9784 }
9785
9786 value += signed_addend;
9787
9788 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9789 value -= (input_section->output_section->vma
9790 + input_section->output_offset + rel->r_offset);
9791
9792 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9793 return bfd_reloc_overflow;
9794
9795 if (branch_type == ST_BRANCH_TO_THUMB)
9796 value |= 1;
9797
9798 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9799 || r_type == R_ARM_THM_MOVT_BREL)
9800 value >>= 16;
9801
9802 insn &= 0xfbf08f00;
9803 insn |= (value & 0xf000) << 4;
9804 insn |= (value & 0x0800) << 15;
9805 insn |= (value & 0x0700) << 4;
9806 insn |= (value & 0x00ff);
9807
9808 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9809 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9810 }
9811 return bfd_reloc_ok;
9812
9813 case R_ARM_ALU_PC_G0_NC:
9814 case R_ARM_ALU_PC_G1_NC:
9815 case R_ARM_ALU_PC_G0:
9816 case R_ARM_ALU_PC_G1:
9817 case R_ARM_ALU_PC_G2:
9818 case R_ARM_ALU_SB_G0_NC:
9819 case R_ARM_ALU_SB_G1_NC:
9820 case R_ARM_ALU_SB_G0:
9821 case R_ARM_ALU_SB_G1:
9822 case R_ARM_ALU_SB_G2:
9823 {
9824 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9825 bfd_vma pc = input_section->output_section->vma
9826 + input_section->output_offset + rel->r_offset;
9827 /* sb should be the origin of the *segment* containing the symbol.
9828 It is not clear how to obtain this OS-dependent value, so we
9829 make an arbitrary choice of zero. */
9830 bfd_vma sb = 0;
9831 bfd_vma residual;
9832 bfd_vma g_n;
9833 bfd_signed_vma signed_value;
9834 int group = 0;
9835
9836 /* Determine which group of bits to select. */
9837 switch (r_type)
9838 {
9839 case R_ARM_ALU_PC_G0_NC:
9840 case R_ARM_ALU_PC_G0:
9841 case R_ARM_ALU_SB_G0_NC:
9842 case R_ARM_ALU_SB_G0:
9843 group = 0;
9844 break;
9845
9846 case R_ARM_ALU_PC_G1_NC:
9847 case R_ARM_ALU_PC_G1:
9848 case R_ARM_ALU_SB_G1_NC:
9849 case R_ARM_ALU_SB_G1:
9850 group = 1;
9851 break;
9852
9853 case R_ARM_ALU_PC_G2:
9854 case R_ARM_ALU_SB_G2:
9855 group = 2;
9856 break;
9857
9858 default:
9859 abort ();
9860 }
9861
9862 /* If REL, extract the addend from the insn. If RELA, it will
9863 have already been fetched for us. */
9864 if (globals->use_rel)
9865 {
9866 int negative;
9867 bfd_vma constant = insn & 0xff;
9868 bfd_vma rotation = (insn & 0xf00) >> 8;
9869
9870 if (rotation == 0)
9871 signed_addend = constant;
9872 else
9873 {
9874 /* Compensate for the fact that in the instruction, the
9875 rotation is stored in multiples of 2 bits. */
9876 rotation *= 2;
9877
9878 /* Rotate "constant" right by "rotation" bits. */
9879 signed_addend = (constant >> rotation) |
9880 (constant << (8 * sizeof (bfd_vma) - rotation));
9881 }
9882
9883 /* Determine if the instruction is an ADD or a SUB.
9884 (For REL, this determines the sign of the addend.) */
9885 negative = identify_add_or_sub (insn);
9886 if (negative == 0)
9887 {
9888 (*_bfd_error_handler)
9889 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
9890 input_bfd, input_section,
9891 (long) rel->r_offset, howto->name);
9892 return bfd_reloc_overflow;
9893 }
9894
9895 signed_addend *= negative;
9896 }
9897
9898 /* Compute the value (X) to go in the place. */
9899 if (r_type == R_ARM_ALU_PC_G0_NC
9900 || r_type == R_ARM_ALU_PC_G1_NC
9901 || r_type == R_ARM_ALU_PC_G0
9902 || r_type == R_ARM_ALU_PC_G1
9903 || r_type == R_ARM_ALU_PC_G2)
9904 /* PC relative. */
9905 signed_value = value - pc + signed_addend;
9906 else
9907 /* Section base relative. */
9908 signed_value = value - sb + signed_addend;
9909
9910 /* If the target symbol is a Thumb function, then set the
9911 Thumb bit in the address. */
9912 if (branch_type == ST_BRANCH_TO_THUMB)
9913 signed_value |= 1;
9914
9915 /* Calculate the value of the relevant G_n, in encoded
9916 constant-with-rotation format. */
9917 g_n = calculate_group_reloc_mask (abs (signed_value), group,
9918 &residual);
9919
9920 /* Check for overflow if required. */
9921 if ((r_type == R_ARM_ALU_PC_G0
9922 || r_type == R_ARM_ALU_PC_G1
9923 || r_type == R_ARM_ALU_PC_G2
9924 || r_type == R_ARM_ALU_SB_G0
9925 || r_type == R_ARM_ALU_SB_G1
9926 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
9927 {
9928 (*_bfd_error_handler)
9929 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9930 input_bfd, input_section,
9931 (long) rel->r_offset, abs (signed_value), howto->name);
9932 return bfd_reloc_overflow;
9933 }
9934
9935 /* Mask out the value and the ADD/SUB part of the opcode; take care
9936 not to destroy the S bit. */
9937 insn &= 0xff1ff000;
9938
9939 /* Set the opcode according to whether the value to go in the
9940 place is negative. */
9941 if (signed_value < 0)
9942 insn |= 1 << 22;
9943 else
9944 insn |= 1 << 23;
9945
9946 /* Encode the offset. */
9947 insn |= g_n;
9948
9949 bfd_put_32 (input_bfd, insn, hit_data);
9950 }
9951 return bfd_reloc_ok;
9952
9953 case R_ARM_LDR_PC_G0:
9954 case R_ARM_LDR_PC_G1:
9955 case R_ARM_LDR_PC_G2:
9956 case R_ARM_LDR_SB_G0:
9957 case R_ARM_LDR_SB_G1:
9958 case R_ARM_LDR_SB_G2:
9959 {
9960 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9961 bfd_vma pc = input_section->output_section->vma
9962 + input_section->output_offset + rel->r_offset;
9963 bfd_vma sb = 0; /* See note above. */
9964 bfd_vma residual;
9965 bfd_signed_vma signed_value;
9966 int group = 0;
9967
9968 /* Determine which groups of bits to calculate. */
9969 switch (r_type)
9970 {
9971 case R_ARM_LDR_PC_G0:
9972 case R_ARM_LDR_SB_G0:
9973 group = 0;
9974 break;
9975
9976 case R_ARM_LDR_PC_G1:
9977 case R_ARM_LDR_SB_G1:
9978 group = 1;
9979 break;
9980
9981 case R_ARM_LDR_PC_G2:
9982 case R_ARM_LDR_SB_G2:
9983 group = 2;
9984 break;
9985
9986 default:
9987 abort ();
9988 }
9989
9990 /* If REL, extract the addend from the insn. If RELA, it will
9991 have already been fetched for us. */
9992 if (globals->use_rel)
9993 {
9994 int negative = (insn & (1 << 23)) ? 1 : -1;
9995 signed_addend = negative * (insn & 0xfff);
9996 }
9997
9998 /* Compute the value (X) to go in the place. */
9999 if (r_type == R_ARM_LDR_PC_G0
10000 || r_type == R_ARM_LDR_PC_G1
10001 || r_type == R_ARM_LDR_PC_G2)
10002 /* PC relative. */
10003 signed_value = value - pc + signed_addend;
10004 else
10005 /* Section base relative. */
10006 signed_value = value - sb + signed_addend;
10007
10008 /* Calculate the value of the relevant G_{n-1} to obtain
10009 the residual at that stage. */
10010 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10011
10012 /* Check for overflow. */
10013 if (residual >= 0x1000)
10014 {
10015 (*_bfd_error_handler)
10016 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10017 input_bfd, input_section,
10018 (long) rel->r_offset, abs (signed_value), howto->name);
10019 return bfd_reloc_overflow;
10020 }
10021
10022 /* Mask out the value and U bit. */
10023 insn &= 0xff7ff000;
10024
10025 /* Set the U bit if the value to go in the place is non-negative. */
10026 if (signed_value >= 0)
10027 insn |= 1 << 23;
10028
10029 /* Encode the offset. */
10030 insn |= residual;
10031
10032 bfd_put_32 (input_bfd, insn, hit_data);
10033 }
10034 return bfd_reloc_ok;
10035
10036 case R_ARM_LDRS_PC_G0:
10037 case R_ARM_LDRS_PC_G1:
10038 case R_ARM_LDRS_PC_G2:
10039 case R_ARM_LDRS_SB_G0:
10040 case R_ARM_LDRS_SB_G1:
10041 case R_ARM_LDRS_SB_G2:
10042 {
10043 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10044 bfd_vma pc = input_section->output_section->vma
10045 + input_section->output_offset + rel->r_offset;
10046 bfd_vma sb = 0; /* See note above. */
10047 bfd_vma residual;
10048 bfd_signed_vma signed_value;
10049 int group = 0;
10050
10051 /* Determine which groups of bits to calculate. */
10052 switch (r_type)
10053 {
10054 case R_ARM_LDRS_PC_G0:
10055 case R_ARM_LDRS_SB_G0:
10056 group = 0;
10057 break;
10058
10059 case R_ARM_LDRS_PC_G1:
10060 case R_ARM_LDRS_SB_G1:
10061 group = 1;
10062 break;
10063
10064 case R_ARM_LDRS_PC_G2:
10065 case R_ARM_LDRS_SB_G2:
10066 group = 2;
10067 break;
10068
10069 default:
10070 abort ();
10071 }
10072
10073 /* If REL, extract the addend from the insn. If RELA, it will
10074 have already been fetched for us. */
10075 if (globals->use_rel)
10076 {
10077 int negative = (insn & (1 << 23)) ? 1 : -1;
10078 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10079 }
10080
10081 /* Compute the value (X) to go in the place. */
10082 if (r_type == R_ARM_LDRS_PC_G0
10083 || r_type == R_ARM_LDRS_PC_G1
10084 || r_type == R_ARM_LDRS_PC_G2)
10085 /* PC relative. */
10086 signed_value = value - pc + signed_addend;
10087 else
10088 /* Section base relative. */
10089 signed_value = value - sb + signed_addend;
10090
10091 /* Calculate the value of the relevant G_{n-1} to obtain
10092 the residual at that stage. */
10093 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10094
10095 /* Check for overflow. */
10096 if (residual >= 0x100)
10097 {
10098 (*_bfd_error_handler)
10099 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10100 input_bfd, input_section,
10101 (long) rel->r_offset, abs (signed_value), howto->name);
10102 return bfd_reloc_overflow;
10103 }
10104
10105 /* Mask out the value and U bit. */
10106 insn &= 0xff7ff0f0;
10107
10108 /* Set the U bit if the value to go in the place is non-negative. */
10109 if (signed_value >= 0)
10110 insn |= 1 << 23;
10111
10112 /* Encode the offset. */
10113 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10114
10115 bfd_put_32 (input_bfd, insn, hit_data);
10116 }
10117 return bfd_reloc_ok;
10118
10119 case R_ARM_LDC_PC_G0:
10120 case R_ARM_LDC_PC_G1:
10121 case R_ARM_LDC_PC_G2:
10122 case R_ARM_LDC_SB_G0:
10123 case R_ARM_LDC_SB_G1:
10124 case R_ARM_LDC_SB_G2:
10125 {
10126 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10127 bfd_vma pc = input_section->output_section->vma
10128 + input_section->output_offset + rel->r_offset;
10129 bfd_vma sb = 0; /* See note above. */
10130 bfd_vma residual;
10131 bfd_signed_vma signed_value;
10132 int group = 0;
10133
10134 /* Determine which groups of bits to calculate. */
10135 switch (r_type)
10136 {
10137 case R_ARM_LDC_PC_G0:
10138 case R_ARM_LDC_SB_G0:
10139 group = 0;
10140 break;
10141
10142 case R_ARM_LDC_PC_G1:
10143 case R_ARM_LDC_SB_G1:
10144 group = 1;
10145 break;
10146
10147 case R_ARM_LDC_PC_G2:
10148 case R_ARM_LDC_SB_G2:
10149 group = 2;
10150 break;
10151
10152 default:
10153 abort ();
10154 }
10155
10156 /* If REL, extract the addend from the insn. If RELA, it will
10157 have already been fetched for us. */
10158 if (globals->use_rel)
10159 {
10160 int negative = (insn & (1 << 23)) ? 1 : -1;
10161 signed_addend = negative * ((insn & 0xff) << 2);
10162 }
10163
10164 /* Compute the value (X) to go in the place. */
10165 if (r_type == R_ARM_LDC_PC_G0
10166 || r_type == R_ARM_LDC_PC_G1
10167 || r_type == R_ARM_LDC_PC_G2)
10168 /* PC relative. */
10169 signed_value = value - pc + signed_addend;
10170 else
10171 /* Section base relative. */
10172 signed_value = value - sb + signed_addend;
10173
10174 /* Calculate the value of the relevant G_{n-1} to obtain
10175 the residual at that stage. */
10176 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10177
10178 /* Check for overflow. (The absolute value to go in the place must be
10179 divisible by four and, after having been divided by four, must
10180 fit in eight bits.) */
10181 if ((residual & 0x3) != 0 || residual >= 0x400)
10182 {
10183 (*_bfd_error_handler)
10184 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10185 input_bfd, input_section,
10186 (long) rel->r_offset, abs (signed_value), howto->name);
10187 return bfd_reloc_overflow;
10188 }
10189
10190 /* Mask out the value and U bit. */
10191 insn &= 0xff7fff00;
10192
10193 /* Set the U bit if the value to go in the place is non-negative. */
10194 if (signed_value >= 0)
10195 insn |= 1 << 23;
10196
10197 /* Encode the offset. */
10198 insn |= residual >> 2;
10199
10200 bfd_put_32 (input_bfd, insn, hit_data);
10201 }
10202 return bfd_reloc_ok;
10203
10204 default:
10205 return bfd_reloc_notsupported;
10206 }
10207 }
10208
10209 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10210 static void
10211 arm_add_to_rel (bfd * abfd,
10212 bfd_byte * address,
10213 reloc_howto_type * howto,
10214 bfd_signed_vma increment)
10215 {
10216 bfd_signed_vma addend;
10217
10218 if (howto->type == R_ARM_THM_CALL
10219 || howto->type == R_ARM_THM_JUMP24)
10220 {
10221 int upper_insn, lower_insn;
10222 int upper, lower;
10223
10224 upper_insn = bfd_get_16 (abfd, address);
10225 lower_insn = bfd_get_16 (abfd, address + 2);
10226 upper = upper_insn & 0x7ff;
10227 lower = lower_insn & 0x7ff;
10228
10229 addend = (upper << 12) | (lower << 1);
10230 addend += increment;
10231 addend >>= 1;
10232
10233 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10234 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10235
10236 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10237 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10238 }
10239 else
10240 {
10241 bfd_vma contents;
10242
10243 contents = bfd_get_32 (abfd, address);
10244
10245 /* Get the (signed) value from the instruction. */
10246 addend = contents & howto->src_mask;
10247 if (addend & ((howto->src_mask + 1) >> 1))
10248 {
10249 bfd_signed_vma mask;
10250
10251 mask = -1;
10252 mask &= ~ howto->src_mask;
10253 addend |= mask;
10254 }
10255
10256 /* Add in the increment, (which is a byte value). */
10257 switch (howto->type)
10258 {
10259 default:
10260 addend += increment;
10261 break;
10262
10263 case R_ARM_PC24:
10264 case R_ARM_PLT32:
10265 case R_ARM_CALL:
10266 case R_ARM_JUMP24:
10267 addend <<= howto->size;
10268 addend += increment;
10269
10270 /* Should we check for overflow here ? */
10271
10272 /* Drop any undesired bits. */
10273 addend >>= howto->rightshift;
10274 break;
10275 }
10276
10277 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10278
10279 bfd_put_32 (abfd, contents, address);
10280 }
10281 }
10282
10283 #define IS_ARM_TLS_RELOC(R_TYPE) \
10284 ((R_TYPE) == R_ARM_TLS_GD32 \
10285 || (R_TYPE) == R_ARM_TLS_LDO32 \
10286 || (R_TYPE) == R_ARM_TLS_LDM32 \
10287 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10288 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10289 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10290 || (R_TYPE) == R_ARM_TLS_LE32 \
10291 || (R_TYPE) == R_ARM_TLS_IE32 \
10292 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10293
10294 /* Specific set of relocations for the gnu tls dialect. */
10295 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10296 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10297 || (R_TYPE) == R_ARM_TLS_CALL \
10298 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10299 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10300 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10301
10302 /* Relocate an ARM ELF section. */
10303
10304 static bfd_boolean
10305 elf32_arm_relocate_section (bfd * output_bfd,
10306 struct bfd_link_info * info,
10307 bfd * input_bfd,
10308 asection * input_section,
10309 bfd_byte * contents,
10310 Elf_Internal_Rela * relocs,
10311 Elf_Internal_Sym * local_syms,
10312 asection ** local_sections)
10313 {
10314 Elf_Internal_Shdr *symtab_hdr;
10315 struct elf_link_hash_entry **sym_hashes;
10316 Elf_Internal_Rela *rel;
10317 Elf_Internal_Rela *relend;
10318 const char *name;
10319 struct elf32_arm_link_hash_table * globals;
10320
10321 globals = elf32_arm_hash_table (info);
10322 if (globals == NULL)
10323 return FALSE;
10324
10325 symtab_hdr = & elf_symtab_hdr (input_bfd);
10326 sym_hashes = elf_sym_hashes (input_bfd);
10327
10328 rel = relocs;
10329 relend = relocs + input_section->reloc_count;
10330 for (; rel < relend; rel++)
10331 {
10332 int r_type;
10333 reloc_howto_type * howto;
10334 unsigned long r_symndx;
10335 Elf_Internal_Sym * sym;
10336 asection * sec;
10337 struct elf_link_hash_entry * h;
10338 bfd_vma relocation;
10339 bfd_reloc_status_type r;
10340 arelent bfd_reloc;
10341 char sym_type;
10342 bfd_boolean unresolved_reloc = FALSE;
10343 char *error_message = NULL;
10344
10345 r_symndx = ELF32_R_SYM (rel->r_info);
10346 r_type = ELF32_R_TYPE (rel->r_info);
10347 r_type = arm_real_reloc_type (globals, r_type);
10348
10349 if ( r_type == R_ARM_GNU_VTENTRY
10350 || r_type == R_ARM_GNU_VTINHERIT)
10351 continue;
10352
10353 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10354 howto = bfd_reloc.howto;
10355
10356 h = NULL;
10357 sym = NULL;
10358 sec = NULL;
10359
10360 if (r_symndx < symtab_hdr->sh_info)
10361 {
10362 sym = local_syms + r_symndx;
10363 sym_type = ELF32_ST_TYPE (sym->st_info);
10364 sec = local_sections[r_symndx];
10365
10366 /* An object file might have a reference to a local
10367 undefined symbol. This is a daft object file, but we
10368 should at least do something about it. V4BX & NONE
10369 relocations do not use the symbol and are explicitly
10370 allowed to use the undefined symbol, so allow those.
10371 Likewise for relocations against STN_UNDEF. */
10372 if (r_type != R_ARM_V4BX
10373 && r_type != R_ARM_NONE
10374 && r_symndx != STN_UNDEF
10375 && bfd_is_und_section (sec)
10376 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10377 {
10378 if (!info->callbacks->undefined_symbol
10379 (info, bfd_elf_string_from_elf_section
10380 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10381 input_bfd, input_section,
10382 rel->r_offset, TRUE))
10383 return FALSE;
10384 }
10385
10386 if (globals->use_rel)
10387 {
10388 relocation = (sec->output_section->vma
10389 + sec->output_offset
10390 + sym->st_value);
10391 if (!info->relocatable
10392 && (sec->flags & SEC_MERGE)
10393 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10394 {
10395 asection *msec;
10396 bfd_vma addend, value;
10397
10398 switch (r_type)
10399 {
10400 case R_ARM_MOVW_ABS_NC:
10401 case R_ARM_MOVT_ABS:
10402 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10403 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10404 addend = (addend ^ 0x8000) - 0x8000;
10405 break;
10406
10407 case R_ARM_THM_MOVW_ABS_NC:
10408 case R_ARM_THM_MOVT_ABS:
10409 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10410 << 16;
10411 value |= bfd_get_16 (input_bfd,
10412 contents + rel->r_offset + 2);
10413 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10414 | ((value & 0x04000000) >> 15);
10415 addend = (addend ^ 0x8000) - 0x8000;
10416 break;
10417
10418 default:
10419 if (howto->rightshift
10420 || (howto->src_mask & (howto->src_mask + 1)))
10421 {
10422 (*_bfd_error_handler)
10423 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10424 input_bfd, input_section,
10425 (long) rel->r_offset, howto->name);
10426 return FALSE;
10427 }
10428
10429 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10430
10431 /* Get the (signed) value from the instruction. */
10432 addend = value & howto->src_mask;
10433 if (addend & ((howto->src_mask + 1) >> 1))
10434 {
10435 bfd_signed_vma mask;
10436
10437 mask = -1;
10438 mask &= ~ howto->src_mask;
10439 addend |= mask;
10440 }
10441 break;
10442 }
10443
10444 msec = sec;
10445 addend =
10446 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10447 - relocation;
10448 addend += msec->output_section->vma + msec->output_offset;
10449
10450 /* Cases here must match those in the preceding
10451 switch statement. */
10452 switch (r_type)
10453 {
10454 case R_ARM_MOVW_ABS_NC:
10455 case R_ARM_MOVT_ABS:
10456 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10457 | (addend & 0xfff);
10458 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10459 break;
10460
10461 case R_ARM_THM_MOVW_ABS_NC:
10462 case R_ARM_THM_MOVT_ABS:
10463 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10464 | (addend & 0xff) | ((addend & 0x0800) << 15);
10465 bfd_put_16 (input_bfd, value >> 16,
10466 contents + rel->r_offset);
10467 bfd_put_16 (input_bfd, value,
10468 contents + rel->r_offset + 2);
10469 break;
10470
10471 default:
10472 value = (value & ~ howto->dst_mask)
10473 | (addend & howto->dst_mask);
10474 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10475 break;
10476 }
10477 }
10478 }
10479 else
10480 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10481 }
10482 else
10483 {
10484 bfd_boolean warned;
10485
10486 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10487 r_symndx, symtab_hdr, sym_hashes,
10488 h, sec, relocation,
10489 unresolved_reloc, warned);
10490
10491 sym_type = h->type;
10492 }
10493
10494 if (sec != NULL && discarded_section (sec))
10495 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10496 rel, 1, relend, howto, 0, contents);
10497
10498 if (info->relocatable)
10499 {
10500 /* This is a relocatable link. We don't have to change
10501 anything, unless the reloc is against a section symbol,
10502 in which case we have to adjust according to where the
10503 section symbol winds up in the output section. */
10504 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10505 {
10506 if (globals->use_rel)
10507 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10508 howto, (bfd_signed_vma) sec->output_offset);
10509 else
10510 rel->r_addend += sec->output_offset;
10511 }
10512 continue;
10513 }
10514
10515 if (h != NULL)
10516 name = h->root.root.string;
10517 else
10518 {
10519 name = (bfd_elf_string_from_elf_section
10520 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10521 if (name == NULL || *name == '\0')
10522 name = bfd_section_name (input_bfd, sec);
10523 }
10524
10525 if (r_symndx != STN_UNDEF
10526 && r_type != R_ARM_NONE
10527 && (h == NULL
10528 || h->root.type == bfd_link_hash_defined
10529 || h->root.type == bfd_link_hash_defweak)
10530 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10531 {
10532 (*_bfd_error_handler)
10533 ((sym_type == STT_TLS
10534 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10535 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10536 input_bfd,
10537 input_section,
10538 (long) rel->r_offset,
10539 howto->name,
10540 name);
10541 }
10542
10543 /* We call elf32_arm_final_link_relocate unless we're completely
10544 done, i.e., the relaxation produced the final output we want,
10545 and we won't let anybody mess with it. Also, we have to do
10546 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10547 both in relaxed and non-relaxed cases */
10548 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10549 || (IS_ARM_TLS_GNU_RELOC (r_type)
10550 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10551 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10552 & GOT_TLS_GDESC)))
10553 {
10554 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10555 contents, rel, h == NULL);
10556 /* This may have been marked unresolved because it came from
10557 a shared library. But we've just dealt with that. */
10558 unresolved_reloc = 0;
10559 }
10560 else
10561 r = bfd_reloc_continue;
10562
10563 if (r == bfd_reloc_continue)
10564 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10565 input_section, contents, rel,
10566 relocation, info, sec, name, sym_type,
10567 (h ? h->target_internal
10568 : ARM_SYM_BRANCH_TYPE (sym)), h,
10569 &unresolved_reloc, &error_message);
10570
10571 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10572 because such sections are not SEC_ALLOC and thus ld.so will
10573 not process them. */
10574 if (unresolved_reloc
10575 && !((input_section->flags & SEC_DEBUGGING) != 0
10576 && h->def_dynamic)
10577 && _bfd_elf_section_offset (output_bfd, info, input_section,
10578 rel->r_offset) != (bfd_vma) -1)
10579 {
10580 (*_bfd_error_handler)
10581 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10582 input_bfd,
10583 input_section,
10584 (long) rel->r_offset,
10585 howto->name,
10586 h->root.root.string);
10587 return FALSE;
10588 }
10589
10590 if (r != bfd_reloc_ok)
10591 {
10592 switch (r)
10593 {
10594 case bfd_reloc_overflow:
10595 /* If the overflowing reloc was to an undefined symbol,
10596 we have already printed one error message and there
10597 is no point complaining again. */
10598 if ((! h ||
10599 h->root.type != bfd_link_hash_undefined)
10600 && (!((*info->callbacks->reloc_overflow)
10601 (info, (h ? &h->root : NULL), name, howto->name,
10602 (bfd_vma) 0, input_bfd, input_section,
10603 rel->r_offset))))
10604 return FALSE;
10605 break;
10606
10607 case bfd_reloc_undefined:
10608 if (!((*info->callbacks->undefined_symbol)
10609 (info, name, input_bfd, input_section,
10610 rel->r_offset, TRUE)))
10611 return FALSE;
10612 break;
10613
10614 case bfd_reloc_outofrange:
10615 error_message = _("out of range");
10616 goto common_error;
10617
10618 case bfd_reloc_notsupported:
10619 error_message = _("unsupported relocation");
10620 goto common_error;
10621
10622 case bfd_reloc_dangerous:
10623 /* error_message should already be set. */
10624 goto common_error;
10625
10626 default:
10627 error_message = _("unknown error");
10628 /* Fall through. */
10629
10630 common_error:
10631 BFD_ASSERT (error_message != NULL);
10632 if (!((*info->callbacks->reloc_dangerous)
10633 (info, error_message, input_bfd, input_section,
10634 rel->r_offset)))
10635 return FALSE;
10636 break;
10637 }
10638 }
10639 }
10640
10641 return TRUE;
10642 }
10643
10644 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10645 adds the edit to the start of the list. (The list must be built in order of
10646 ascending TINDEX: the function's callers are primarily responsible for
10647 maintaining that condition). */
10648
10649 static void
10650 add_unwind_table_edit (arm_unwind_table_edit **head,
10651 arm_unwind_table_edit **tail,
10652 arm_unwind_edit_type type,
10653 asection *linked_section,
10654 unsigned int tindex)
10655 {
10656 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10657 xmalloc (sizeof (arm_unwind_table_edit));
10658
10659 new_edit->type = type;
10660 new_edit->linked_section = linked_section;
10661 new_edit->index = tindex;
10662
10663 if (tindex > 0)
10664 {
10665 new_edit->next = NULL;
10666
10667 if (*tail)
10668 (*tail)->next = new_edit;
10669
10670 (*tail) = new_edit;
10671
10672 if (!*head)
10673 (*head) = new_edit;
10674 }
10675 else
10676 {
10677 new_edit->next = *head;
10678
10679 if (!*tail)
10680 *tail = new_edit;
10681
10682 *head = new_edit;
10683 }
10684 }
10685
10686 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10687
10688 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10689 static void
10690 adjust_exidx_size(asection *exidx_sec, int adjust)
10691 {
10692 asection *out_sec;
10693
10694 if (!exidx_sec->rawsize)
10695 exidx_sec->rawsize = exidx_sec->size;
10696
10697 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10698 out_sec = exidx_sec->output_section;
10699 /* Adjust size of output section. */
10700 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10701 }
10702
10703 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10704 static void
10705 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10706 {
10707 struct _arm_elf_section_data *exidx_arm_data;
10708
10709 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10710 add_unwind_table_edit (
10711 &exidx_arm_data->u.exidx.unwind_edit_list,
10712 &exidx_arm_data->u.exidx.unwind_edit_tail,
10713 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10714
10715 adjust_exidx_size(exidx_sec, 8);
10716 }
10717
10718 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10719 made to those tables, such that:
10720
10721 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10722 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10723 codes which have been inlined into the index).
10724
10725 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10726
10727 The edits are applied when the tables are written
10728 (in elf32_arm_write_section). */
10729
10730 bfd_boolean
10731 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10732 unsigned int num_text_sections,
10733 struct bfd_link_info *info,
10734 bfd_boolean merge_exidx_entries)
10735 {
10736 bfd *inp;
10737 unsigned int last_second_word = 0, i;
10738 asection *last_exidx_sec = NULL;
10739 asection *last_text_sec = NULL;
10740 int last_unwind_type = -1;
10741
10742 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10743 text sections. */
10744 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
10745 {
10746 asection *sec;
10747
10748 for (sec = inp->sections; sec != NULL; sec = sec->next)
10749 {
10750 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10751 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10752
10753 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10754 continue;
10755
10756 if (elf_sec->linked_to)
10757 {
10758 Elf_Internal_Shdr *linked_hdr
10759 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10760 struct _arm_elf_section_data *linked_sec_arm_data
10761 = get_arm_elf_section_data (linked_hdr->bfd_section);
10762
10763 if (linked_sec_arm_data == NULL)
10764 continue;
10765
10766 /* Link this .ARM.exidx section back from the text section it
10767 describes. */
10768 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10769 }
10770 }
10771 }
10772
10773 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10774 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10775 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10776
10777 for (i = 0; i < num_text_sections; i++)
10778 {
10779 asection *sec = text_section_order[i];
10780 asection *exidx_sec;
10781 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10782 struct _arm_elf_section_data *exidx_arm_data;
10783 bfd_byte *contents = NULL;
10784 int deleted_exidx_bytes = 0;
10785 bfd_vma j;
10786 arm_unwind_table_edit *unwind_edit_head = NULL;
10787 arm_unwind_table_edit *unwind_edit_tail = NULL;
10788 Elf_Internal_Shdr *hdr;
10789 bfd *ibfd;
10790
10791 if (arm_data == NULL)
10792 continue;
10793
10794 exidx_sec = arm_data->u.text.arm_exidx_sec;
10795 if (exidx_sec == NULL)
10796 {
10797 /* Section has no unwind data. */
10798 if (last_unwind_type == 0 || !last_exidx_sec)
10799 continue;
10800
10801 /* Ignore zero sized sections. */
10802 if (sec->size == 0)
10803 continue;
10804
10805 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10806 last_unwind_type = 0;
10807 continue;
10808 }
10809
10810 /* Skip /DISCARD/ sections. */
10811 if (bfd_is_abs_section (exidx_sec->output_section))
10812 continue;
10813
10814 hdr = &elf_section_data (exidx_sec)->this_hdr;
10815 if (hdr->sh_type != SHT_ARM_EXIDX)
10816 continue;
10817
10818 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10819 if (exidx_arm_data == NULL)
10820 continue;
10821
10822 ibfd = exidx_sec->owner;
10823
10824 if (hdr->contents != NULL)
10825 contents = hdr->contents;
10826 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
10827 /* An error? */
10828 continue;
10829
10830 for (j = 0; j < hdr->sh_size; j += 8)
10831 {
10832 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
10833 int unwind_type;
10834 int elide = 0;
10835
10836 /* An EXIDX_CANTUNWIND entry. */
10837 if (second_word == 1)
10838 {
10839 if (last_unwind_type == 0)
10840 elide = 1;
10841 unwind_type = 0;
10842 }
10843 /* Inlined unwinding data. Merge if equal to previous. */
10844 else if ((second_word & 0x80000000) != 0)
10845 {
10846 if (merge_exidx_entries
10847 && last_second_word == second_word && last_unwind_type == 1)
10848 elide = 1;
10849 unwind_type = 1;
10850 last_second_word = second_word;
10851 }
10852 /* Normal table entry. In theory we could merge these too,
10853 but duplicate entries are likely to be much less common. */
10854 else
10855 unwind_type = 2;
10856
10857 if (elide)
10858 {
10859 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
10860 DELETE_EXIDX_ENTRY, NULL, j / 8);
10861
10862 deleted_exidx_bytes += 8;
10863 }
10864
10865 last_unwind_type = unwind_type;
10866 }
10867
10868 /* Free contents if we allocated it ourselves. */
10869 if (contents != hdr->contents)
10870 free (contents);
10871
10872 /* Record edits to be applied later (in elf32_arm_write_section). */
10873 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
10874 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
10875
10876 if (deleted_exidx_bytes > 0)
10877 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
10878
10879 last_exidx_sec = exidx_sec;
10880 last_text_sec = sec;
10881 }
10882
10883 /* Add terminating CANTUNWIND entry. */
10884 if (last_exidx_sec && last_unwind_type != 0)
10885 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10886
10887 return TRUE;
10888 }
10889
10890 static bfd_boolean
10891 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
10892 bfd *ibfd, const char *name)
10893 {
10894 asection *sec, *osec;
10895
10896 sec = bfd_get_linker_section (ibfd, name);
10897 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
10898 return TRUE;
10899
10900 osec = sec->output_section;
10901 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
10902 return TRUE;
10903
10904 if (! bfd_set_section_contents (obfd, osec, sec->contents,
10905 sec->output_offset, sec->size))
10906 return FALSE;
10907
10908 return TRUE;
10909 }
10910
10911 static bfd_boolean
10912 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
10913 {
10914 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
10915 asection *sec, *osec;
10916
10917 if (globals == NULL)
10918 return FALSE;
10919
10920 /* Invoke the regular ELF backend linker to do all the work. */
10921 if (!bfd_elf_final_link (abfd, info))
10922 return FALSE;
10923
10924 /* Process stub sections (eg BE8 encoding, ...). */
10925 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
10926 int i;
10927 for (i=0; i<htab->top_id; i++)
10928 {
10929 sec = htab->stub_group[i].stub_sec;
10930 /* Only process it once, in its link_sec slot. */
10931 if (sec && i == htab->stub_group[i].link_sec->id)
10932 {
10933 osec = sec->output_section;
10934 elf32_arm_write_section (abfd, info, sec, sec->contents);
10935 if (! bfd_set_section_contents (abfd, osec, sec->contents,
10936 sec->output_offset, sec->size))
10937 return FALSE;
10938 }
10939 }
10940
10941 /* Write out any glue sections now that we have created all the
10942 stubs. */
10943 if (globals->bfd_of_glue_owner != NULL)
10944 {
10945 if (! elf32_arm_output_glue_section (info, abfd,
10946 globals->bfd_of_glue_owner,
10947 ARM2THUMB_GLUE_SECTION_NAME))
10948 return FALSE;
10949
10950 if (! elf32_arm_output_glue_section (info, abfd,
10951 globals->bfd_of_glue_owner,
10952 THUMB2ARM_GLUE_SECTION_NAME))
10953 return FALSE;
10954
10955 if (! elf32_arm_output_glue_section (info, abfd,
10956 globals->bfd_of_glue_owner,
10957 VFP11_ERRATUM_VENEER_SECTION_NAME))
10958 return FALSE;
10959
10960 if (! elf32_arm_output_glue_section (info, abfd,
10961 globals->bfd_of_glue_owner,
10962 ARM_BX_GLUE_SECTION_NAME))
10963 return FALSE;
10964 }
10965
10966 return TRUE;
10967 }
10968
10969 /* Return a best guess for the machine number based on the attributes. */
10970
10971 static unsigned int
10972 bfd_arm_get_mach_from_attributes (bfd * abfd)
10973 {
10974 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
10975
10976 switch (arch)
10977 {
10978 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
10979 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
10980 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
10981
10982 case TAG_CPU_ARCH_V5TE:
10983 {
10984 char * name;
10985
10986 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
10987 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
10988
10989 if (name)
10990 {
10991 if (strcmp (name, "IWMMXT2") == 0)
10992 return bfd_mach_arm_iWMMXt2;
10993
10994 if (strcmp (name, "IWMMXT") == 0)
10995 return bfd_mach_arm_iWMMXt;
10996 }
10997
10998 return bfd_mach_arm_5TE;
10999 }
11000
11001 default:
11002 return bfd_mach_arm_unknown;
11003 }
11004 }
11005
11006 /* Set the right machine number. */
11007
11008 static bfd_boolean
11009 elf32_arm_object_p (bfd *abfd)
11010 {
11011 unsigned int mach;
11012
11013 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11014
11015 if (mach == bfd_mach_arm_unknown)
11016 {
11017 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11018 mach = bfd_mach_arm_ep9312;
11019 else
11020 mach = bfd_arm_get_mach_from_attributes (abfd);
11021 }
11022
11023 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11024 return TRUE;
11025 }
11026
11027 /* Function to keep ARM specific flags in the ELF header. */
11028
11029 static bfd_boolean
11030 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11031 {
11032 if (elf_flags_init (abfd)
11033 && elf_elfheader (abfd)->e_flags != flags)
11034 {
11035 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11036 {
11037 if (flags & EF_ARM_INTERWORK)
11038 (*_bfd_error_handler)
11039 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11040 abfd);
11041 else
11042 _bfd_error_handler
11043 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11044 abfd);
11045 }
11046 }
11047 else
11048 {
11049 elf_elfheader (abfd)->e_flags = flags;
11050 elf_flags_init (abfd) = TRUE;
11051 }
11052
11053 return TRUE;
11054 }
11055
11056 /* Copy backend specific data from one object module to another. */
11057
11058 static bfd_boolean
11059 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11060 {
11061 flagword in_flags;
11062 flagword out_flags;
11063
11064 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11065 return TRUE;
11066
11067 in_flags = elf_elfheader (ibfd)->e_flags;
11068 out_flags = elf_elfheader (obfd)->e_flags;
11069
11070 if (elf_flags_init (obfd)
11071 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11072 && in_flags != out_flags)
11073 {
11074 /* Cannot mix APCS26 and APCS32 code. */
11075 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11076 return FALSE;
11077
11078 /* Cannot mix float APCS and non-float APCS code. */
11079 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11080 return FALSE;
11081
11082 /* If the src and dest have different interworking flags
11083 then turn off the interworking bit. */
11084 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11085 {
11086 if (out_flags & EF_ARM_INTERWORK)
11087 _bfd_error_handler
11088 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11089 obfd, ibfd);
11090
11091 in_flags &= ~EF_ARM_INTERWORK;
11092 }
11093
11094 /* Likewise for PIC, though don't warn for this case. */
11095 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11096 in_flags &= ~EF_ARM_PIC;
11097 }
11098
11099 elf_elfheader (obfd)->e_flags = in_flags;
11100 elf_flags_init (obfd) = TRUE;
11101
11102 /* Also copy the EI_OSABI field. */
11103 elf_elfheader (obfd)->e_ident[EI_OSABI] =
11104 elf_elfheader (ibfd)->e_ident[EI_OSABI];
11105
11106 /* Copy object attributes. */
11107 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11108
11109 return TRUE;
11110 }
11111
11112 /* Values for Tag_ABI_PCS_R9_use. */
11113 enum
11114 {
11115 AEABI_R9_V6,
11116 AEABI_R9_SB,
11117 AEABI_R9_TLS,
11118 AEABI_R9_unused
11119 };
11120
11121 /* Values for Tag_ABI_PCS_RW_data. */
11122 enum
11123 {
11124 AEABI_PCS_RW_data_absolute,
11125 AEABI_PCS_RW_data_PCrel,
11126 AEABI_PCS_RW_data_SBrel,
11127 AEABI_PCS_RW_data_unused
11128 };
11129
11130 /* Values for Tag_ABI_enum_size. */
11131 enum
11132 {
11133 AEABI_enum_unused,
11134 AEABI_enum_short,
11135 AEABI_enum_wide,
11136 AEABI_enum_forced_wide
11137 };
11138
11139 /* Determine whether an object attribute tag takes an integer, a
11140 string or both. */
11141
11142 static int
11143 elf32_arm_obj_attrs_arg_type (int tag)
11144 {
11145 if (tag == Tag_compatibility)
11146 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11147 else if (tag == Tag_nodefaults)
11148 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11149 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11150 return ATTR_TYPE_FLAG_STR_VAL;
11151 else if (tag < 32)
11152 return ATTR_TYPE_FLAG_INT_VAL;
11153 else
11154 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11155 }
11156
11157 /* The ABI defines that Tag_conformance should be emitted first, and that
11158 Tag_nodefaults should be second (if either is defined). This sets those
11159 two positions, and bumps up the position of all the remaining tags to
11160 compensate. */
11161 static int
11162 elf32_arm_obj_attrs_order (int num)
11163 {
11164 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11165 return Tag_conformance;
11166 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11167 return Tag_nodefaults;
11168 if ((num - 2) < Tag_nodefaults)
11169 return num - 2;
11170 if ((num - 1) < Tag_conformance)
11171 return num - 1;
11172 return num;
11173 }
11174
11175 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11176 static bfd_boolean
11177 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11178 {
11179 if ((tag & 127) < 64)
11180 {
11181 _bfd_error_handler
11182 (_("%B: Unknown mandatory EABI object attribute %d"),
11183 abfd, tag);
11184 bfd_set_error (bfd_error_bad_value);
11185 return FALSE;
11186 }
11187 else
11188 {
11189 _bfd_error_handler
11190 (_("Warning: %B: Unknown EABI object attribute %d"),
11191 abfd, tag);
11192 return TRUE;
11193 }
11194 }
11195
11196 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11197 Returns -1 if no architecture could be read. */
11198
11199 static int
11200 get_secondary_compatible_arch (bfd *abfd)
11201 {
11202 obj_attribute *attr =
11203 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11204
11205 /* Note: the tag and its argument below are uleb128 values, though
11206 currently-defined values fit in one byte for each. */
11207 if (attr->s
11208 && attr->s[0] == Tag_CPU_arch
11209 && (attr->s[1] & 128) != 128
11210 && attr->s[2] == 0)
11211 return attr->s[1];
11212
11213 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11214 return -1;
11215 }
11216
11217 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11218 The tag is removed if ARCH is -1. */
11219
11220 static void
11221 set_secondary_compatible_arch (bfd *abfd, int arch)
11222 {
11223 obj_attribute *attr =
11224 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11225
11226 if (arch == -1)
11227 {
11228 attr->s = NULL;
11229 return;
11230 }
11231
11232 /* Note: the tag and its argument below are uleb128 values, though
11233 currently-defined values fit in one byte for each. */
11234 if (!attr->s)
11235 attr->s = (char *) bfd_alloc (abfd, 3);
11236 attr->s[0] = Tag_CPU_arch;
11237 attr->s[1] = arch;
11238 attr->s[2] = '\0';
11239 }
11240
11241 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11242 into account. */
11243
11244 static int
11245 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11246 int newtag, int secondary_compat)
11247 {
11248 #define T(X) TAG_CPU_ARCH_##X
11249 int tagl, tagh, result;
11250 const int v6t2[] =
11251 {
11252 T(V6T2), /* PRE_V4. */
11253 T(V6T2), /* V4. */
11254 T(V6T2), /* V4T. */
11255 T(V6T2), /* V5T. */
11256 T(V6T2), /* V5TE. */
11257 T(V6T2), /* V5TEJ. */
11258 T(V6T2), /* V6. */
11259 T(V7), /* V6KZ. */
11260 T(V6T2) /* V6T2. */
11261 };
11262 const int v6k[] =
11263 {
11264 T(V6K), /* PRE_V4. */
11265 T(V6K), /* V4. */
11266 T(V6K), /* V4T. */
11267 T(V6K), /* V5T. */
11268 T(V6K), /* V5TE. */
11269 T(V6K), /* V5TEJ. */
11270 T(V6K), /* V6. */
11271 T(V6KZ), /* V6KZ. */
11272 T(V7), /* V6T2. */
11273 T(V6K) /* V6K. */
11274 };
11275 const int v7[] =
11276 {
11277 T(V7), /* PRE_V4. */
11278 T(V7), /* V4. */
11279 T(V7), /* V4T. */
11280 T(V7), /* V5T. */
11281 T(V7), /* V5TE. */
11282 T(V7), /* V5TEJ. */
11283 T(V7), /* V6. */
11284 T(V7), /* V6KZ. */
11285 T(V7), /* V6T2. */
11286 T(V7), /* V6K. */
11287 T(V7) /* V7. */
11288 };
11289 const int v6_m[] =
11290 {
11291 -1, /* PRE_V4. */
11292 -1, /* V4. */
11293 T(V6K), /* V4T. */
11294 T(V6K), /* V5T. */
11295 T(V6K), /* V5TE. */
11296 T(V6K), /* V5TEJ. */
11297 T(V6K), /* V6. */
11298 T(V6KZ), /* V6KZ. */
11299 T(V7), /* V6T2. */
11300 T(V6K), /* V6K. */
11301 T(V7), /* V7. */
11302 T(V6_M) /* V6_M. */
11303 };
11304 const int v6s_m[] =
11305 {
11306 -1, /* PRE_V4. */
11307 -1, /* V4. */
11308 T(V6K), /* V4T. */
11309 T(V6K), /* V5T. */
11310 T(V6K), /* V5TE. */
11311 T(V6K), /* V5TEJ. */
11312 T(V6K), /* V6. */
11313 T(V6KZ), /* V6KZ. */
11314 T(V7), /* V6T2. */
11315 T(V6K), /* V6K. */
11316 T(V7), /* V7. */
11317 T(V6S_M), /* V6_M. */
11318 T(V6S_M) /* V6S_M. */
11319 };
11320 const int v7e_m[] =
11321 {
11322 -1, /* PRE_V4. */
11323 -1, /* V4. */
11324 T(V7E_M), /* V4T. */
11325 T(V7E_M), /* V5T. */
11326 T(V7E_M), /* V5TE. */
11327 T(V7E_M), /* V5TEJ. */
11328 T(V7E_M), /* V6. */
11329 T(V7E_M), /* V6KZ. */
11330 T(V7E_M), /* V6T2. */
11331 T(V7E_M), /* V6K. */
11332 T(V7E_M), /* V7. */
11333 T(V7E_M), /* V6_M. */
11334 T(V7E_M), /* V6S_M. */
11335 T(V7E_M) /* V7E_M. */
11336 };
11337 const int v8[] =
11338 {
11339 T(V8), /* PRE_V4. */
11340 T(V8), /* V4. */
11341 T(V8), /* V4T. */
11342 T(V8), /* V5T. */
11343 T(V8), /* V5TE. */
11344 T(V8), /* V5TEJ. */
11345 T(V8), /* V6. */
11346 T(V8), /* V6KZ. */
11347 T(V8), /* V6T2. */
11348 T(V8), /* V6K. */
11349 T(V8), /* V7. */
11350 T(V8), /* V6_M. */
11351 T(V8), /* V6S_M. */
11352 T(V8), /* V7E_M. */
11353 T(V8) /* V8. */
11354 };
11355 const int v4t_plus_v6_m[] =
11356 {
11357 -1, /* PRE_V4. */
11358 -1, /* V4. */
11359 T(V4T), /* V4T. */
11360 T(V5T), /* V5T. */
11361 T(V5TE), /* V5TE. */
11362 T(V5TEJ), /* V5TEJ. */
11363 T(V6), /* V6. */
11364 T(V6KZ), /* V6KZ. */
11365 T(V6T2), /* V6T2. */
11366 T(V6K), /* V6K. */
11367 T(V7), /* V7. */
11368 T(V6_M), /* V6_M. */
11369 T(V6S_M), /* V6S_M. */
11370 T(V7E_M), /* V7E_M. */
11371 T(V8), /* V8. */
11372 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11373 };
11374 const int *comb[] =
11375 {
11376 v6t2,
11377 v6k,
11378 v7,
11379 v6_m,
11380 v6s_m,
11381 v7e_m,
11382 v8,
11383 /* Pseudo-architecture. */
11384 v4t_plus_v6_m
11385 };
11386
11387 /* Check we've not got a higher architecture than we know about. */
11388
11389 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11390 {
11391 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11392 return -1;
11393 }
11394
11395 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11396
11397 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11398 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11399 oldtag = T(V4T_PLUS_V6_M);
11400
11401 /* And override the new tag if we have a Tag_also_compatible_with on the
11402 input. */
11403
11404 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11405 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11406 newtag = T(V4T_PLUS_V6_M);
11407
11408 tagl = (oldtag < newtag) ? oldtag : newtag;
11409 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11410
11411 /* Architectures before V6KZ add features monotonically. */
11412 if (tagh <= TAG_CPU_ARCH_V6KZ)
11413 return result;
11414
11415 result = comb[tagh - T(V6T2)][tagl];
11416
11417 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11418 as the canonical version. */
11419 if (result == T(V4T_PLUS_V6_M))
11420 {
11421 result = T(V4T);
11422 *secondary_compat_out = T(V6_M);
11423 }
11424 else
11425 *secondary_compat_out = -1;
11426
11427 if (result == -1)
11428 {
11429 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11430 ibfd, oldtag, newtag);
11431 return -1;
11432 }
11433
11434 return result;
11435 #undef T
11436 }
11437
11438 /* Query attributes object to see if integer divide instructions may be
11439 present in an object. */
11440 static bfd_boolean
11441 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11442 {
11443 int arch = attr[Tag_CPU_arch].i;
11444 int profile = attr[Tag_CPU_arch_profile].i;
11445
11446 switch (attr[Tag_DIV_use].i)
11447 {
11448 case 0:
11449 /* Integer divide allowed if instruction contained in archetecture. */
11450 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11451 return TRUE;
11452 else if (arch >= TAG_CPU_ARCH_V7E_M)
11453 return TRUE;
11454 else
11455 return FALSE;
11456
11457 case 1:
11458 /* Integer divide explicitly prohibited. */
11459 return FALSE;
11460
11461 default:
11462 /* Unrecognised case - treat as allowing divide everywhere. */
11463 case 2:
11464 /* Integer divide allowed in ARM state. */
11465 return TRUE;
11466 }
11467 }
11468
11469 /* Query attributes object to see if integer divide instructions are
11470 forbidden to be in the object. This is not the inverse of
11471 elf32_arm_attributes_accept_div. */
11472 static bfd_boolean
11473 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11474 {
11475 return attr[Tag_DIV_use].i == 1;
11476 }
11477
11478 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11479 are conflicting attributes. */
11480
11481 static bfd_boolean
11482 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11483 {
11484 obj_attribute *in_attr;
11485 obj_attribute *out_attr;
11486 /* Some tags have 0 = don't care, 1 = strong requirement,
11487 2 = weak requirement. */
11488 static const int order_021[3] = {0, 2, 1};
11489 int i;
11490 bfd_boolean result = TRUE;
11491
11492 /* Skip the linker stubs file. This preserves previous behavior
11493 of accepting unknown attributes in the first input file - but
11494 is that a bug? */
11495 if (ibfd->flags & BFD_LINKER_CREATED)
11496 return TRUE;
11497
11498 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11499 {
11500 /* This is the first object. Copy the attributes. */
11501 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11502
11503 out_attr = elf_known_obj_attributes_proc (obfd);
11504
11505 /* Use the Tag_null value to indicate the attributes have been
11506 initialized. */
11507 out_attr[0].i = 1;
11508
11509 /* We do not output objects with Tag_MPextension_use_legacy - we move
11510 the attribute's value to Tag_MPextension_use. */
11511 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11512 {
11513 if (out_attr[Tag_MPextension_use].i != 0
11514 && out_attr[Tag_MPextension_use_legacy].i
11515 != out_attr[Tag_MPextension_use].i)
11516 {
11517 _bfd_error_handler
11518 (_("Error: %B has both the current and legacy "
11519 "Tag_MPextension_use attributes"), ibfd);
11520 result = FALSE;
11521 }
11522
11523 out_attr[Tag_MPextension_use] =
11524 out_attr[Tag_MPextension_use_legacy];
11525 out_attr[Tag_MPextension_use_legacy].type = 0;
11526 out_attr[Tag_MPextension_use_legacy].i = 0;
11527 }
11528
11529 return result;
11530 }
11531
11532 in_attr = elf_known_obj_attributes_proc (ibfd);
11533 out_attr = elf_known_obj_attributes_proc (obfd);
11534 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11535 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11536 {
11537 /* Ignore mismatches if the object doesn't use floating point. */
11538 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11539 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11540 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11541 {
11542 _bfd_error_handler
11543 (_("error: %B uses VFP register arguments, %B does not"),
11544 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11545 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11546 result = FALSE;
11547 }
11548 }
11549
11550 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11551 {
11552 /* Merge this attribute with existing attributes. */
11553 switch (i)
11554 {
11555 case Tag_CPU_raw_name:
11556 case Tag_CPU_name:
11557 /* These are merged after Tag_CPU_arch. */
11558 break;
11559
11560 case Tag_ABI_optimization_goals:
11561 case Tag_ABI_FP_optimization_goals:
11562 /* Use the first value seen. */
11563 break;
11564
11565 case Tag_CPU_arch:
11566 {
11567 int secondary_compat = -1, secondary_compat_out = -1;
11568 unsigned int saved_out_attr = out_attr[i].i;
11569 static const char *name_table[] = {
11570 /* These aren't real CPU names, but we can't guess
11571 that from the architecture version alone. */
11572 "Pre v4",
11573 "ARM v4",
11574 "ARM v4T",
11575 "ARM v5T",
11576 "ARM v5TE",
11577 "ARM v5TEJ",
11578 "ARM v6",
11579 "ARM v6KZ",
11580 "ARM v6T2",
11581 "ARM v6K",
11582 "ARM v7",
11583 "ARM v6-M",
11584 "ARM v6S-M",
11585 "ARM v8"
11586 };
11587
11588 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11589 secondary_compat = get_secondary_compatible_arch (ibfd);
11590 secondary_compat_out = get_secondary_compatible_arch (obfd);
11591 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11592 &secondary_compat_out,
11593 in_attr[i].i,
11594 secondary_compat);
11595 set_secondary_compatible_arch (obfd, secondary_compat_out);
11596
11597 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11598 if (out_attr[i].i == saved_out_attr)
11599 ; /* Leave the names alone. */
11600 else if (out_attr[i].i == in_attr[i].i)
11601 {
11602 /* The output architecture has been changed to match the
11603 input architecture. Use the input names. */
11604 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11605 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11606 : NULL;
11607 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11608 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11609 : NULL;
11610 }
11611 else
11612 {
11613 out_attr[Tag_CPU_name].s = NULL;
11614 out_attr[Tag_CPU_raw_name].s = NULL;
11615 }
11616
11617 /* If we still don't have a value for Tag_CPU_name,
11618 make one up now. Tag_CPU_raw_name remains blank. */
11619 if (out_attr[Tag_CPU_name].s == NULL
11620 && out_attr[i].i < ARRAY_SIZE (name_table))
11621 out_attr[Tag_CPU_name].s =
11622 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11623 }
11624 break;
11625
11626 case Tag_ARM_ISA_use:
11627 case Tag_THUMB_ISA_use:
11628 case Tag_WMMX_arch:
11629 case Tag_Advanced_SIMD_arch:
11630 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11631 case Tag_ABI_FP_rounding:
11632 case Tag_ABI_FP_exceptions:
11633 case Tag_ABI_FP_user_exceptions:
11634 case Tag_ABI_FP_number_model:
11635 case Tag_FP_HP_extension:
11636 case Tag_CPU_unaligned_access:
11637 case Tag_T2EE_use:
11638 case Tag_MPextension_use:
11639 /* Use the largest value specified. */
11640 if (in_attr[i].i > out_attr[i].i)
11641 out_attr[i].i = in_attr[i].i;
11642 break;
11643
11644 case Tag_ABI_align_preserved:
11645 case Tag_ABI_PCS_RO_data:
11646 /* Use the smallest value specified. */
11647 if (in_attr[i].i < out_attr[i].i)
11648 out_attr[i].i = in_attr[i].i;
11649 break;
11650
11651 case Tag_ABI_align_needed:
11652 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11653 && (in_attr[Tag_ABI_align_preserved].i == 0
11654 || out_attr[Tag_ABI_align_preserved].i == 0))
11655 {
11656 /* This error message should be enabled once all non-conformant
11657 binaries in the toolchain have had the attributes set
11658 properly.
11659 _bfd_error_handler
11660 (_("error: %B: 8-byte data alignment conflicts with %B"),
11661 obfd, ibfd);
11662 result = FALSE; */
11663 }
11664 /* Fall through. */
11665 case Tag_ABI_FP_denormal:
11666 case Tag_ABI_PCS_GOT_use:
11667 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11668 value if greater than 2 (for future-proofing). */
11669 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11670 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11671 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11672 out_attr[i].i = in_attr[i].i;
11673 break;
11674
11675 case Tag_Virtualization_use:
11676 /* The virtualization tag effectively stores two bits of
11677 information: the intended use of TrustZone (in bit 0), and the
11678 intended use of Virtualization (in bit 1). */
11679 if (out_attr[i].i == 0)
11680 out_attr[i].i = in_attr[i].i;
11681 else if (in_attr[i].i != 0
11682 && in_attr[i].i != out_attr[i].i)
11683 {
11684 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11685 out_attr[i].i = 3;
11686 else
11687 {
11688 _bfd_error_handler
11689 (_("error: %B: unable to merge virtualization attributes "
11690 "with %B"),
11691 obfd, ibfd);
11692 result = FALSE;
11693 }
11694 }
11695 break;
11696
11697 case Tag_CPU_arch_profile:
11698 if (out_attr[i].i != in_attr[i].i)
11699 {
11700 /* 0 will merge with anything.
11701 'A' and 'S' merge to 'A'.
11702 'R' and 'S' merge to 'R'.
11703 'M' and 'A|R|S' is an error. */
11704 if (out_attr[i].i == 0
11705 || (out_attr[i].i == 'S'
11706 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11707 out_attr[i].i = in_attr[i].i;
11708 else if (in_attr[i].i == 0
11709 || (in_attr[i].i == 'S'
11710 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11711 ; /* Do nothing. */
11712 else
11713 {
11714 _bfd_error_handler
11715 (_("error: %B: Conflicting architecture profiles %c/%c"),
11716 ibfd,
11717 in_attr[i].i ? in_attr[i].i : '0',
11718 out_attr[i].i ? out_attr[i].i : '0');
11719 result = FALSE;
11720 }
11721 }
11722 break;
11723 case Tag_FP_arch:
11724 {
11725 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11726 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11727 when it's 0. It might mean absence of FP hardware if
11728 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11729
11730 #define VFP_VERSION_COUNT 8
11731 static const struct
11732 {
11733 int ver;
11734 int regs;
11735 } vfp_versions[VFP_VERSION_COUNT] =
11736 {
11737 {0, 0},
11738 {1, 16},
11739 {2, 16},
11740 {3, 32},
11741 {3, 16},
11742 {4, 32},
11743 {4, 16},
11744 {8, 32}
11745 };
11746 int ver;
11747 int regs;
11748 int newval;
11749
11750 /* If the output has no requirement about FP hardware,
11751 follow the requirement of the input. */
11752 if (out_attr[i].i == 0)
11753 {
11754 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11755 out_attr[i].i = in_attr[i].i;
11756 out_attr[Tag_ABI_HardFP_use].i
11757 = in_attr[Tag_ABI_HardFP_use].i;
11758 break;
11759 }
11760 /* If the input has no requirement about FP hardware, do
11761 nothing. */
11762 else if (in_attr[i].i == 0)
11763 {
11764 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11765 break;
11766 }
11767
11768 /* Both the input and the output have nonzero Tag_FP_arch.
11769 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11770
11771 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11772 do nothing. */
11773 if (in_attr[Tag_ABI_HardFP_use].i == 0
11774 && out_attr[Tag_ABI_HardFP_use].i == 0)
11775 ;
11776 /* If the input and the output have different Tag_ABI_HardFP_use,
11777 the combination of them is 3 (SP & DP). */
11778 else if (in_attr[Tag_ABI_HardFP_use].i
11779 != out_attr[Tag_ABI_HardFP_use].i)
11780 out_attr[Tag_ABI_HardFP_use].i = 3;
11781
11782 /* Now we can handle Tag_FP_arch. */
11783
11784 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
11785 pick the biggest. */
11786 if (in_attr[i].i >= VFP_VERSION_COUNT
11787 && in_attr[i].i > out_attr[i].i)
11788 {
11789 out_attr[i] = in_attr[i];
11790 break;
11791 }
11792 /* The output uses the superset of input features
11793 (ISA version) and registers. */
11794 ver = vfp_versions[in_attr[i].i].ver;
11795 if (ver < vfp_versions[out_attr[i].i].ver)
11796 ver = vfp_versions[out_attr[i].i].ver;
11797 regs = vfp_versions[in_attr[i].i].regs;
11798 if (regs < vfp_versions[out_attr[i].i].regs)
11799 regs = vfp_versions[out_attr[i].i].regs;
11800 /* This assumes all possible supersets are also a valid
11801 options. */
11802 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
11803 {
11804 if (regs == vfp_versions[newval].regs
11805 && ver == vfp_versions[newval].ver)
11806 break;
11807 }
11808 out_attr[i].i = newval;
11809 }
11810 break;
11811 case Tag_PCS_config:
11812 if (out_attr[i].i == 0)
11813 out_attr[i].i = in_attr[i].i;
11814 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
11815 {
11816 /* It's sometimes ok to mix different configs, so this is only
11817 a warning. */
11818 _bfd_error_handler
11819 (_("Warning: %B: Conflicting platform configuration"), ibfd);
11820 }
11821 break;
11822 case Tag_ABI_PCS_R9_use:
11823 if (in_attr[i].i != out_attr[i].i
11824 && out_attr[i].i != AEABI_R9_unused
11825 && in_attr[i].i != AEABI_R9_unused)
11826 {
11827 _bfd_error_handler
11828 (_("error: %B: Conflicting use of R9"), ibfd);
11829 result = FALSE;
11830 }
11831 if (out_attr[i].i == AEABI_R9_unused)
11832 out_attr[i].i = in_attr[i].i;
11833 break;
11834 case Tag_ABI_PCS_RW_data:
11835 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
11836 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
11837 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
11838 {
11839 _bfd_error_handler
11840 (_("error: %B: SB relative addressing conflicts with use of R9"),
11841 ibfd);
11842 result = FALSE;
11843 }
11844 /* Use the smallest value specified. */
11845 if (in_attr[i].i < out_attr[i].i)
11846 out_attr[i].i = in_attr[i].i;
11847 break;
11848 case Tag_ABI_PCS_wchar_t:
11849 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
11850 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
11851 {
11852 _bfd_error_handler
11853 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
11854 ibfd, in_attr[i].i, out_attr[i].i);
11855 }
11856 else if (in_attr[i].i && !out_attr[i].i)
11857 out_attr[i].i = in_attr[i].i;
11858 break;
11859 case Tag_ABI_enum_size:
11860 if (in_attr[i].i != AEABI_enum_unused)
11861 {
11862 if (out_attr[i].i == AEABI_enum_unused
11863 || out_attr[i].i == AEABI_enum_forced_wide)
11864 {
11865 /* The existing object is compatible with anything.
11866 Use whatever requirements the new object has. */
11867 out_attr[i].i = in_attr[i].i;
11868 }
11869 else if (in_attr[i].i != AEABI_enum_forced_wide
11870 && out_attr[i].i != in_attr[i].i
11871 && !elf_arm_tdata (obfd)->no_enum_size_warning)
11872 {
11873 static const char *aeabi_enum_names[] =
11874 { "", "variable-size", "32-bit", "" };
11875 const char *in_name =
11876 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11877 ? aeabi_enum_names[in_attr[i].i]
11878 : "<unknown>";
11879 const char *out_name =
11880 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11881 ? aeabi_enum_names[out_attr[i].i]
11882 : "<unknown>";
11883 _bfd_error_handler
11884 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
11885 ibfd, in_name, out_name);
11886 }
11887 }
11888 break;
11889 case Tag_ABI_VFP_args:
11890 /* Aready done. */
11891 break;
11892 case Tag_ABI_WMMX_args:
11893 if (in_attr[i].i != out_attr[i].i)
11894 {
11895 _bfd_error_handler
11896 (_("error: %B uses iWMMXt register arguments, %B does not"),
11897 ibfd, obfd);
11898 result = FALSE;
11899 }
11900 break;
11901 case Tag_compatibility:
11902 /* Merged in target-independent code. */
11903 break;
11904 case Tag_ABI_HardFP_use:
11905 /* This is handled along with Tag_FP_arch. */
11906 break;
11907 case Tag_ABI_FP_16bit_format:
11908 if (in_attr[i].i != 0 && out_attr[i].i != 0)
11909 {
11910 if (in_attr[i].i != out_attr[i].i)
11911 {
11912 _bfd_error_handler
11913 (_("error: fp16 format mismatch between %B and %B"),
11914 ibfd, obfd);
11915 result = FALSE;
11916 }
11917 }
11918 if (in_attr[i].i != 0)
11919 out_attr[i].i = in_attr[i].i;
11920 break;
11921
11922 case Tag_DIV_use:
11923 /* A value of zero on input means that the divide instruction may
11924 be used if available in the base architecture as specified via
11925 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
11926 the user did not want divide instructions. A value of 2
11927 explicitly means that divide instructions were allowed in ARM
11928 and Thumb state. */
11929 if (in_attr[i].i == out_attr[i].i)
11930 /* Do nothing. */ ;
11931 else if (elf32_arm_attributes_forbid_div (in_attr)
11932 && !elf32_arm_attributes_accept_div (out_attr))
11933 out_attr[i].i = 1;
11934 else if (elf32_arm_attributes_forbid_div (out_attr)
11935 && elf32_arm_attributes_accept_div (in_attr))
11936 out_attr[i].i = in_attr[i].i;
11937 else if (in_attr[i].i == 2)
11938 out_attr[i].i = in_attr[i].i;
11939 break;
11940
11941 case Tag_MPextension_use_legacy:
11942 /* We don't output objects with Tag_MPextension_use_legacy - we
11943 move the value to Tag_MPextension_use. */
11944 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
11945 {
11946 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
11947 {
11948 _bfd_error_handler
11949 (_("%B has has both the current and legacy "
11950 "Tag_MPextension_use attributes"),
11951 ibfd);
11952 result = FALSE;
11953 }
11954 }
11955
11956 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
11957 out_attr[Tag_MPextension_use] = in_attr[i];
11958
11959 break;
11960
11961 case Tag_nodefaults:
11962 /* This tag is set if it exists, but the value is unused (and is
11963 typically zero). We don't actually need to do anything here -
11964 the merge happens automatically when the type flags are merged
11965 below. */
11966 break;
11967 case Tag_also_compatible_with:
11968 /* Already done in Tag_CPU_arch. */
11969 break;
11970 case Tag_conformance:
11971 /* Keep the attribute if it matches. Throw it away otherwise.
11972 No attribute means no claim to conform. */
11973 if (!in_attr[i].s || !out_attr[i].s
11974 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
11975 out_attr[i].s = NULL;
11976 break;
11977
11978 default:
11979 result
11980 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
11981 }
11982
11983 /* If out_attr was copied from in_attr then it won't have a type yet. */
11984 if (in_attr[i].type && !out_attr[i].type)
11985 out_attr[i].type = in_attr[i].type;
11986 }
11987
11988 /* Merge Tag_compatibility attributes and any common GNU ones. */
11989 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
11990 return FALSE;
11991
11992 /* Check for any attributes not known on ARM. */
11993 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
11994
11995 return result;
11996 }
11997
11998
11999 /* Return TRUE if the two EABI versions are incompatible. */
12000
12001 static bfd_boolean
12002 elf32_arm_versions_compatible (unsigned iver, unsigned over)
12003 {
12004 /* v4 and v5 are the same spec before and after it was released,
12005 so allow mixing them. */
12006 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
12007 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
12008 return TRUE;
12009
12010 return (iver == over);
12011 }
12012
12013 /* Merge backend specific data from an object file to the output
12014 object file when linking. */
12015
12016 static bfd_boolean
12017 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
12018
12019 /* Display the flags field. */
12020
12021 static bfd_boolean
12022 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
12023 {
12024 FILE * file = (FILE *) ptr;
12025 unsigned long flags;
12026
12027 BFD_ASSERT (abfd != NULL && ptr != NULL);
12028
12029 /* Print normal ELF private data. */
12030 _bfd_elf_print_private_bfd_data (abfd, ptr);
12031
12032 flags = elf_elfheader (abfd)->e_flags;
12033 /* Ignore init flag - it may not be set, despite the flags field
12034 containing valid data. */
12035
12036 /* xgettext:c-format */
12037 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12038
12039 switch (EF_ARM_EABI_VERSION (flags))
12040 {
12041 case EF_ARM_EABI_UNKNOWN:
12042 /* The following flag bits are GNU extensions and not part of the
12043 official ARM ELF extended ABI. Hence they are only decoded if
12044 the EABI version is not set. */
12045 if (flags & EF_ARM_INTERWORK)
12046 fprintf (file, _(" [interworking enabled]"));
12047
12048 if (flags & EF_ARM_APCS_26)
12049 fprintf (file, " [APCS-26]");
12050 else
12051 fprintf (file, " [APCS-32]");
12052
12053 if (flags & EF_ARM_VFP_FLOAT)
12054 fprintf (file, _(" [VFP float format]"));
12055 else if (flags & EF_ARM_MAVERICK_FLOAT)
12056 fprintf (file, _(" [Maverick float format]"));
12057 else
12058 fprintf (file, _(" [FPA float format]"));
12059
12060 if (flags & EF_ARM_APCS_FLOAT)
12061 fprintf (file, _(" [floats passed in float registers]"));
12062
12063 if (flags & EF_ARM_PIC)
12064 fprintf (file, _(" [position independent]"));
12065
12066 if (flags & EF_ARM_NEW_ABI)
12067 fprintf (file, _(" [new ABI]"));
12068
12069 if (flags & EF_ARM_OLD_ABI)
12070 fprintf (file, _(" [old ABI]"));
12071
12072 if (flags & EF_ARM_SOFT_FLOAT)
12073 fprintf (file, _(" [software FP]"));
12074
12075 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12076 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12077 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12078 | EF_ARM_MAVERICK_FLOAT);
12079 break;
12080
12081 case EF_ARM_EABI_VER1:
12082 fprintf (file, _(" [Version1 EABI]"));
12083
12084 if (flags & EF_ARM_SYMSARESORTED)
12085 fprintf (file, _(" [sorted symbol table]"));
12086 else
12087 fprintf (file, _(" [unsorted symbol table]"));
12088
12089 flags &= ~ EF_ARM_SYMSARESORTED;
12090 break;
12091
12092 case EF_ARM_EABI_VER2:
12093 fprintf (file, _(" [Version2 EABI]"));
12094
12095 if (flags & EF_ARM_SYMSARESORTED)
12096 fprintf (file, _(" [sorted symbol table]"));
12097 else
12098 fprintf (file, _(" [unsorted symbol table]"));
12099
12100 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12101 fprintf (file, _(" [dynamic symbols use segment index]"));
12102
12103 if (flags & EF_ARM_MAPSYMSFIRST)
12104 fprintf (file, _(" [mapping symbols precede others]"));
12105
12106 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12107 | EF_ARM_MAPSYMSFIRST);
12108 break;
12109
12110 case EF_ARM_EABI_VER3:
12111 fprintf (file, _(" [Version3 EABI]"));
12112 break;
12113
12114 case EF_ARM_EABI_VER4:
12115 fprintf (file, _(" [Version4 EABI]"));
12116 goto eabi;
12117
12118 case EF_ARM_EABI_VER5:
12119 fprintf (file, _(" [Version5 EABI]"));
12120
12121 if (flags & EF_ARM_ABI_FLOAT_SOFT)
12122 fprintf (file, _(" [soft-float ABI]"));
12123
12124 if (flags & EF_ARM_ABI_FLOAT_HARD)
12125 fprintf (file, _(" [hard-float ABI]"));
12126
12127 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
12128
12129 eabi:
12130 if (flags & EF_ARM_BE8)
12131 fprintf (file, _(" [BE8]"));
12132
12133 if (flags & EF_ARM_LE8)
12134 fprintf (file, _(" [LE8]"));
12135
12136 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12137 break;
12138
12139 default:
12140 fprintf (file, _(" <EABI version unrecognised>"));
12141 break;
12142 }
12143
12144 flags &= ~ EF_ARM_EABIMASK;
12145
12146 if (flags & EF_ARM_RELEXEC)
12147 fprintf (file, _(" [relocatable executable]"));
12148
12149 if (flags & EF_ARM_HASENTRY)
12150 fprintf (file, _(" [has entry point]"));
12151
12152 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
12153
12154 if (flags)
12155 fprintf (file, _("<Unrecognised flag bits set>"));
12156
12157 fputc ('\n', file);
12158
12159 return TRUE;
12160 }
12161
12162 static int
12163 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12164 {
12165 switch (ELF_ST_TYPE (elf_sym->st_info))
12166 {
12167 case STT_ARM_TFUNC:
12168 return ELF_ST_TYPE (elf_sym->st_info);
12169
12170 case STT_ARM_16BIT:
12171 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12172 This allows us to distinguish between data used by Thumb instructions
12173 and non-data (which is probably code) inside Thumb regions of an
12174 executable. */
12175 if (type != STT_OBJECT && type != STT_TLS)
12176 return ELF_ST_TYPE (elf_sym->st_info);
12177 break;
12178
12179 default:
12180 break;
12181 }
12182
12183 return type;
12184 }
12185
12186 static asection *
12187 elf32_arm_gc_mark_hook (asection *sec,
12188 struct bfd_link_info *info,
12189 Elf_Internal_Rela *rel,
12190 struct elf_link_hash_entry *h,
12191 Elf_Internal_Sym *sym)
12192 {
12193 if (h != NULL)
12194 switch (ELF32_R_TYPE (rel->r_info))
12195 {
12196 case R_ARM_GNU_VTINHERIT:
12197 case R_ARM_GNU_VTENTRY:
12198 return NULL;
12199 }
12200
12201 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12202 }
12203
12204 /* Update the got entry reference counts for the section being removed. */
12205
12206 static bfd_boolean
12207 elf32_arm_gc_sweep_hook (bfd * abfd,
12208 struct bfd_link_info * info,
12209 asection * sec,
12210 const Elf_Internal_Rela * relocs)
12211 {
12212 Elf_Internal_Shdr *symtab_hdr;
12213 struct elf_link_hash_entry **sym_hashes;
12214 bfd_signed_vma *local_got_refcounts;
12215 const Elf_Internal_Rela *rel, *relend;
12216 struct elf32_arm_link_hash_table * globals;
12217
12218 if (info->relocatable)
12219 return TRUE;
12220
12221 globals = elf32_arm_hash_table (info);
12222 if (globals == NULL)
12223 return FALSE;
12224
12225 elf_section_data (sec)->local_dynrel = NULL;
12226
12227 symtab_hdr = & elf_symtab_hdr (abfd);
12228 sym_hashes = elf_sym_hashes (abfd);
12229 local_got_refcounts = elf_local_got_refcounts (abfd);
12230
12231 check_use_blx (globals);
12232
12233 relend = relocs + sec->reloc_count;
12234 for (rel = relocs; rel < relend; rel++)
12235 {
12236 unsigned long r_symndx;
12237 struct elf_link_hash_entry *h = NULL;
12238 struct elf32_arm_link_hash_entry *eh;
12239 int r_type;
12240 bfd_boolean call_reloc_p;
12241 bfd_boolean may_become_dynamic_p;
12242 bfd_boolean may_need_local_target_p;
12243 union gotplt_union *root_plt;
12244 struct arm_plt_info *arm_plt;
12245
12246 r_symndx = ELF32_R_SYM (rel->r_info);
12247 if (r_symndx >= symtab_hdr->sh_info)
12248 {
12249 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12250 while (h->root.type == bfd_link_hash_indirect
12251 || h->root.type == bfd_link_hash_warning)
12252 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12253 }
12254 eh = (struct elf32_arm_link_hash_entry *) h;
12255
12256 call_reloc_p = FALSE;
12257 may_become_dynamic_p = FALSE;
12258 may_need_local_target_p = FALSE;
12259
12260 r_type = ELF32_R_TYPE (rel->r_info);
12261 r_type = arm_real_reloc_type (globals, r_type);
12262 switch (r_type)
12263 {
12264 case R_ARM_GOT32:
12265 case R_ARM_GOT_PREL:
12266 case R_ARM_TLS_GD32:
12267 case R_ARM_TLS_IE32:
12268 if (h != NULL)
12269 {
12270 if (h->got.refcount > 0)
12271 h->got.refcount -= 1;
12272 }
12273 else if (local_got_refcounts != NULL)
12274 {
12275 if (local_got_refcounts[r_symndx] > 0)
12276 local_got_refcounts[r_symndx] -= 1;
12277 }
12278 break;
12279
12280 case R_ARM_TLS_LDM32:
12281 globals->tls_ldm_got.refcount -= 1;
12282 break;
12283
12284 case R_ARM_PC24:
12285 case R_ARM_PLT32:
12286 case R_ARM_CALL:
12287 case R_ARM_JUMP24:
12288 case R_ARM_PREL31:
12289 case R_ARM_THM_CALL:
12290 case R_ARM_THM_JUMP24:
12291 case R_ARM_THM_JUMP19:
12292 call_reloc_p = TRUE;
12293 may_need_local_target_p = TRUE;
12294 break;
12295
12296 case R_ARM_ABS12:
12297 if (!globals->vxworks_p)
12298 {
12299 may_need_local_target_p = TRUE;
12300 break;
12301 }
12302 /* Fall through. */
12303 case R_ARM_ABS32:
12304 case R_ARM_ABS32_NOI:
12305 case R_ARM_REL32:
12306 case R_ARM_REL32_NOI:
12307 case R_ARM_MOVW_ABS_NC:
12308 case R_ARM_MOVT_ABS:
12309 case R_ARM_MOVW_PREL_NC:
12310 case R_ARM_MOVT_PREL:
12311 case R_ARM_THM_MOVW_ABS_NC:
12312 case R_ARM_THM_MOVT_ABS:
12313 case R_ARM_THM_MOVW_PREL_NC:
12314 case R_ARM_THM_MOVT_PREL:
12315 /* Should the interworking branches be here also? */
12316 if ((info->shared || globals->root.is_relocatable_executable)
12317 && (sec->flags & SEC_ALLOC) != 0)
12318 {
12319 if (h == NULL
12320 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12321 {
12322 call_reloc_p = TRUE;
12323 may_need_local_target_p = TRUE;
12324 }
12325 else
12326 may_become_dynamic_p = TRUE;
12327 }
12328 else
12329 may_need_local_target_p = TRUE;
12330 break;
12331
12332 default:
12333 break;
12334 }
12335
12336 if (may_need_local_target_p
12337 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12338 {
12339 /* If PLT refcount book-keeping is wrong and too low, we'll
12340 see a zero value (going to -1) for the root PLT reference
12341 count. */
12342 if (root_plt->refcount >= 0)
12343 {
12344 BFD_ASSERT (root_plt->refcount != 0);
12345 root_plt->refcount -= 1;
12346 }
12347 else
12348 /* A value of -1 means the symbol has become local, forced
12349 or seeing a hidden definition. Any other negative value
12350 is an error. */
12351 BFD_ASSERT (root_plt->refcount == -1);
12352
12353 if (!call_reloc_p)
12354 arm_plt->noncall_refcount--;
12355
12356 if (r_type == R_ARM_THM_CALL)
12357 arm_plt->maybe_thumb_refcount--;
12358
12359 if (r_type == R_ARM_THM_JUMP24
12360 || r_type == R_ARM_THM_JUMP19)
12361 arm_plt->thumb_refcount--;
12362 }
12363
12364 if (may_become_dynamic_p)
12365 {
12366 struct elf_dyn_relocs **pp;
12367 struct elf_dyn_relocs *p;
12368
12369 if (h != NULL)
12370 pp = &(eh->dyn_relocs);
12371 else
12372 {
12373 Elf_Internal_Sym *isym;
12374
12375 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12376 abfd, r_symndx);
12377 if (isym == NULL)
12378 return FALSE;
12379 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12380 if (pp == NULL)
12381 return FALSE;
12382 }
12383 for (; (p = *pp) != NULL; pp = &p->next)
12384 if (p->sec == sec)
12385 {
12386 /* Everything must go for SEC. */
12387 *pp = p->next;
12388 break;
12389 }
12390 }
12391 }
12392
12393 return TRUE;
12394 }
12395
12396 /* Look through the relocs for a section during the first phase. */
12397
12398 static bfd_boolean
12399 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12400 asection *sec, const Elf_Internal_Rela *relocs)
12401 {
12402 Elf_Internal_Shdr *symtab_hdr;
12403 struct elf_link_hash_entry **sym_hashes;
12404 const Elf_Internal_Rela *rel;
12405 const Elf_Internal_Rela *rel_end;
12406 bfd *dynobj;
12407 asection *sreloc;
12408 struct elf32_arm_link_hash_table *htab;
12409 bfd_boolean call_reloc_p;
12410 bfd_boolean may_become_dynamic_p;
12411 bfd_boolean may_need_local_target_p;
12412 unsigned long nsyms;
12413
12414 if (info->relocatable)
12415 return TRUE;
12416
12417 BFD_ASSERT (is_arm_elf (abfd));
12418
12419 htab = elf32_arm_hash_table (info);
12420 if (htab == NULL)
12421 return FALSE;
12422
12423 sreloc = NULL;
12424
12425 /* Create dynamic sections for relocatable executables so that we can
12426 copy relocations. */
12427 if (htab->root.is_relocatable_executable
12428 && ! htab->root.dynamic_sections_created)
12429 {
12430 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12431 return FALSE;
12432 }
12433
12434 if (htab->root.dynobj == NULL)
12435 htab->root.dynobj = abfd;
12436 if (!create_ifunc_sections (info))
12437 return FALSE;
12438
12439 dynobj = htab->root.dynobj;
12440
12441 symtab_hdr = & elf_symtab_hdr (abfd);
12442 sym_hashes = elf_sym_hashes (abfd);
12443 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12444
12445 rel_end = relocs + sec->reloc_count;
12446 for (rel = relocs; rel < rel_end; rel++)
12447 {
12448 Elf_Internal_Sym *isym;
12449 struct elf_link_hash_entry *h;
12450 struct elf32_arm_link_hash_entry *eh;
12451 unsigned long r_symndx;
12452 int r_type;
12453
12454 r_symndx = ELF32_R_SYM (rel->r_info);
12455 r_type = ELF32_R_TYPE (rel->r_info);
12456 r_type = arm_real_reloc_type (htab, r_type);
12457
12458 if (r_symndx >= nsyms
12459 /* PR 9934: It is possible to have relocations that do not
12460 refer to symbols, thus it is also possible to have an
12461 object file containing relocations but no symbol table. */
12462 && (r_symndx > STN_UNDEF || nsyms > 0))
12463 {
12464 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12465 r_symndx);
12466 return FALSE;
12467 }
12468
12469 h = NULL;
12470 isym = NULL;
12471 if (nsyms > 0)
12472 {
12473 if (r_symndx < symtab_hdr->sh_info)
12474 {
12475 /* A local symbol. */
12476 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12477 abfd, r_symndx);
12478 if (isym == NULL)
12479 return FALSE;
12480 }
12481 else
12482 {
12483 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12484 while (h->root.type == bfd_link_hash_indirect
12485 || h->root.type == bfd_link_hash_warning)
12486 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12487 }
12488 }
12489
12490 eh = (struct elf32_arm_link_hash_entry *) h;
12491
12492 call_reloc_p = FALSE;
12493 may_become_dynamic_p = FALSE;
12494 may_need_local_target_p = FALSE;
12495
12496 /* Could be done earlier, if h were already available. */
12497 r_type = elf32_arm_tls_transition (info, r_type, h);
12498 switch (r_type)
12499 {
12500 case R_ARM_GOT32:
12501 case R_ARM_GOT_PREL:
12502 case R_ARM_TLS_GD32:
12503 case R_ARM_TLS_IE32:
12504 case R_ARM_TLS_GOTDESC:
12505 case R_ARM_TLS_DESCSEQ:
12506 case R_ARM_THM_TLS_DESCSEQ:
12507 case R_ARM_TLS_CALL:
12508 case R_ARM_THM_TLS_CALL:
12509 /* This symbol requires a global offset table entry. */
12510 {
12511 int tls_type, old_tls_type;
12512
12513 switch (r_type)
12514 {
12515 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12516
12517 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12518
12519 case R_ARM_TLS_GOTDESC:
12520 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12521 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12522 tls_type = GOT_TLS_GDESC; break;
12523
12524 default: tls_type = GOT_NORMAL; break;
12525 }
12526
12527 if (h != NULL)
12528 {
12529 h->got.refcount++;
12530 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12531 }
12532 else
12533 {
12534 /* This is a global offset table entry for a local symbol. */
12535 if (!elf32_arm_allocate_local_sym_info (abfd))
12536 return FALSE;
12537 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12538 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12539 }
12540
12541 /* If a variable is accessed with both tls methods, two
12542 slots may be created. */
12543 if (GOT_TLS_GD_ANY_P (old_tls_type)
12544 && GOT_TLS_GD_ANY_P (tls_type))
12545 tls_type |= old_tls_type;
12546
12547 /* We will already have issued an error message if there
12548 is a TLS/non-TLS mismatch, based on the symbol
12549 type. So just combine any TLS types needed. */
12550 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12551 && tls_type != GOT_NORMAL)
12552 tls_type |= old_tls_type;
12553
12554 /* If the symbol is accessed in both IE and GDESC
12555 method, we're able to relax. Turn off the GDESC flag,
12556 without messing up with any other kind of tls types
12557 that may be involved */
12558 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12559 tls_type &= ~GOT_TLS_GDESC;
12560
12561 if (old_tls_type != tls_type)
12562 {
12563 if (h != NULL)
12564 elf32_arm_hash_entry (h)->tls_type = tls_type;
12565 else
12566 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12567 }
12568 }
12569 /* Fall through. */
12570
12571 case R_ARM_TLS_LDM32:
12572 if (r_type == R_ARM_TLS_LDM32)
12573 htab->tls_ldm_got.refcount++;
12574 /* Fall through. */
12575
12576 case R_ARM_GOTOFF32:
12577 case R_ARM_GOTPC:
12578 if (htab->root.sgot == NULL
12579 && !create_got_section (htab->root.dynobj, info))
12580 return FALSE;
12581 break;
12582
12583 case R_ARM_PC24:
12584 case R_ARM_PLT32:
12585 case R_ARM_CALL:
12586 case R_ARM_JUMP24:
12587 case R_ARM_PREL31:
12588 case R_ARM_THM_CALL:
12589 case R_ARM_THM_JUMP24:
12590 case R_ARM_THM_JUMP19:
12591 call_reloc_p = TRUE;
12592 may_need_local_target_p = TRUE;
12593 break;
12594
12595 case R_ARM_ABS12:
12596 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12597 ldr __GOTT_INDEX__ offsets. */
12598 if (!htab->vxworks_p)
12599 {
12600 may_need_local_target_p = TRUE;
12601 break;
12602 }
12603 /* Fall through. */
12604
12605 case R_ARM_MOVW_ABS_NC:
12606 case R_ARM_MOVT_ABS:
12607 case R_ARM_THM_MOVW_ABS_NC:
12608 case R_ARM_THM_MOVT_ABS:
12609 if (info->shared)
12610 {
12611 (*_bfd_error_handler)
12612 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12613 abfd, elf32_arm_howto_table_1[r_type].name,
12614 (h) ? h->root.root.string : "a local symbol");
12615 bfd_set_error (bfd_error_bad_value);
12616 return FALSE;
12617 }
12618
12619 /* Fall through. */
12620 case R_ARM_ABS32:
12621 case R_ARM_ABS32_NOI:
12622 case R_ARM_REL32:
12623 case R_ARM_REL32_NOI:
12624 case R_ARM_MOVW_PREL_NC:
12625 case R_ARM_MOVT_PREL:
12626 case R_ARM_THM_MOVW_PREL_NC:
12627 case R_ARM_THM_MOVT_PREL:
12628
12629 /* Should the interworking branches be listed here? */
12630 if ((info->shared || htab->root.is_relocatable_executable)
12631 && (sec->flags & SEC_ALLOC) != 0)
12632 {
12633 if (h == NULL
12634 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12635 {
12636 /* In shared libraries and relocatable executables,
12637 we treat local relative references as calls;
12638 see the related SYMBOL_CALLS_LOCAL code in
12639 allocate_dynrelocs. */
12640 call_reloc_p = TRUE;
12641 may_need_local_target_p = TRUE;
12642 }
12643 else
12644 /* We are creating a shared library or relocatable
12645 executable, and this is a reloc against a global symbol,
12646 or a non-PC-relative reloc against a local symbol.
12647 We may need to copy the reloc into the output. */
12648 may_become_dynamic_p = TRUE;
12649 }
12650 else
12651 may_need_local_target_p = TRUE;
12652 break;
12653
12654 /* This relocation describes the C++ object vtable hierarchy.
12655 Reconstruct it for later use during GC. */
12656 case R_ARM_GNU_VTINHERIT:
12657 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12658 return FALSE;
12659 break;
12660
12661 /* This relocation describes which C++ vtable entries are actually
12662 used. Record for later use during GC. */
12663 case R_ARM_GNU_VTENTRY:
12664 BFD_ASSERT (h != NULL);
12665 if (h != NULL
12666 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12667 return FALSE;
12668 break;
12669 }
12670
12671 if (h != NULL)
12672 {
12673 if (call_reloc_p)
12674 /* We may need a .plt entry if the function this reloc
12675 refers to is in a different object, regardless of the
12676 symbol's type. We can't tell for sure yet, because
12677 something later might force the symbol local. */
12678 h->needs_plt = 1;
12679 else if (may_need_local_target_p)
12680 /* If this reloc is in a read-only section, we might
12681 need a copy reloc. We can't check reliably at this
12682 stage whether the section is read-only, as input
12683 sections have not yet been mapped to output sections.
12684 Tentatively set the flag for now, and correct in
12685 adjust_dynamic_symbol. */
12686 h->non_got_ref = 1;
12687 }
12688
12689 if (may_need_local_target_p
12690 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12691 {
12692 union gotplt_union *root_plt;
12693 struct arm_plt_info *arm_plt;
12694 struct arm_local_iplt_info *local_iplt;
12695
12696 if (h != NULL)
12697 {
12698 root_plt = &h->plt;
12699 arm_plt = &eh->plt;
12700 }
12701 else
12702 {
12703 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12704 if (local_iplt == NULL)
12705 return FALSE;
12706 root_plt = &local_iplt->root;
12707 arm_plt = &local_iplt->arm;
12708 }
12709
12710 /* If the symbol is a function that doesn't bind locally,
12711 this relocation will need a PLT entry. */
12712 if (root_plt->refcount != -1)
12713 root_plt->refcount += 1;
12714
12715 if (!call_reloc_p)
12716 arm_plt->noncall_refcount++;
12717
12718 /* It's too early to use htab->use_blx here, so we have to
12719 record possible blx references separately from
12720 relocs that definitely need a thumb stub. */
12721
12722 if (r_type == R_ARM_THM_CALL)
12723 arm_plt->maybe_thumb_refcount += 1;
12724
12725 if (r_type == R_ARM_THM_JUMP24
12726 || r_type == R_ARM_THM_JUMP19)
12727 arm_plt->thumb_refcount += 1;
12728 }
12729
12730 if (may_become_dynamic_p)
12731 {
12732 struct elf_dyn_relocs *p, **head;
12733
12734 /* Create a reloc section in dynobj. */
12735 if (sreloc == NULL)
12736 {
12737 sreloc = _bfd_elf_make_dynamic_reloc_section
12738 (sec, dynobj, 2, abfd, ! htab->use_rel);
12739
12740 if (sreloc == NULL)
12741 return FALSE;
12742
12743 /* BPABI objects never have dynamic relocations mapped. */
12744 if (htab->symbian_p)
12745 {
12746 flagword flags;
12747
12748 flags = bfd_get_section_flags (dynobj, sreloc);
12749 flags &= ~(SEC_LOAD | SEC_ALLOC);
12750 bfd_set_section_flags (dynobj, sreloc, flags);
12751 }
12752 }
12753
12754 /* If this is a global symbol, count the number of
12755 relocations we need for this symbol. */
12756 if (h != NULL)
12757 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12758 else
12759 {
12760 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12761 if (head == NULL)
12762 return FALSE;
12763 }
12764
12765 p = *head;
12766 if (p == NULL || p->sec != sec)
12767 {
12768 bfd_size_type amt = sizeof *p;
12769
12770 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12771 if (p == NULL)
12772 return FALSE;
12773 p->next = *head;
12774 *head = p;
12775 p->sec = sec;
12776 p->count = 0;
12777 p->pc_count = 0;
12778 }
12779
12780 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12781 p->pc_count += 1;
12782 p->count += 1;
12783 }
12784 }
12785
12786 return TRUE;
12787 }
12788
12789 /* Unwinding tables are not referenced directly. This pass marks them as
12790 required if the corresponding code section is marked. */
12791
12792 static bfd_boolean
12793 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12794 elf_gc_mark_hook_fn gc_mark_hook)
12795 {
12796 bfd *sub;
12797 Elf_Internal_Shdr **elf_shdrp;
12798 bfd_boolean again;
12799
12800 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
12801
12802 /* Marking EH data may cause additional code sections to be marked,
12803 requiring multiple passes. */
12804 again = TRUE;
12805 while (again)
12806 {
12807 again = FALSE;
12808 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
12809 {
12810 asection *o;
12811
12812 if (! is_arm_elf (sub))
12813 continue;
12814
12815 elf_shdrp = elf_elfsections (sub);
12816 for (o = sub->sections; o != NULL; o = o->next)
12817 {
12818 Elf_Internal_Shdr *hdr;
12819
12820 hdr = &elf_section_data (o)->this_hdr;
12821 if (hdr->sh_type == SHT_ARM_EXIDX
12822 && hdr->sh_link
12823 && hdr->sh_link < elf_numsections (sub)
12824 && !o->gc_mark
12825 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
12826 {
12827 again = TRUE;
12828 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
12829 return FALSE;
12830 }
12831 }
12832 }
12833 }
12834
12835 return TRUE;
12836 }
12837
12838 /* Treat mapping symbols as special target symbols. */
12839
12840 static bfd_boolean
12841 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
12842 {
12843 return bfd_is_arm_special_symbol_name (sym->name,
12844 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
12845 }
12846
12847 /* This is a copy of elf_find_function() from elf.c except that
12848 ARM mapping symbols are ignored when looking for function names
12849 and STT_ARM_TFUNC is considered to a function type. */
12850
12851 static bfd_boolean
12852 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
12853 asection * section,
12854 asymbol ** symbols,
12855 bfd_vma offset,
12856 const char ** filename_ptr,
12857 const char ** functionname_ptr)
12858 {
12859 const char * filename = NULL;
12860 asymbol * func = NULL;
12861 bfd_vma low_func = 0;
12862 asymbol ** p;
12863
12864 for (p = symbols; *p != NULL; p++)
12865 {
12866 elf_symbol_type *q;
12867
12868 q = (elf_symbol_type *) *p;
12869
12870 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
12871 {
12872 default:
12873 break;
12874 case STT_FILE:
12875 filename = bfd_asymbol_name (&q->symbol);
12876 break;
12877 case STT_FUNC:
12878 case STT_ARM_TFUNC:
12879 case STT_NOTYPE:
12880 /* Skip mapping symbols. */
12881 if ((q->symbol.flags & BSF_LOCAL)
12882 && bfd_is_arm_special_symbol_name (q->symbol.name,
12883 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
12884 continue;
12885 /* Fall through. */
12886 if (bfd_get_section (&q->symbol) == section
12887 && q->symbol.value >= low_func
12888 && q->symbol.value <= offset)
12889 {
12890 func = (asymbol *) q;
12891 low_func = q->symbol.value;
12892 }
12893 break;
12894 }
12895 }
12896
12897 if (func == NULL)
12898 return FALSE;
12899
12900 if (filename_ptr)
12901 *filename_ptr = filename;
12902 if (functionname_ptr)
12903 *functionname_ptr = bfd_asymbol_name (func);
12904
12905 return TRUE;
12906 }
12907
12908
12909 /* Find the nearest line to a particular section and offset, for error
12910 reporting. This code is a duplicate of the code in elf.c, except
12911 that it uses arm_elf_find_function. */
12912
12913 static bfd_boolean
12914 elf32_arm_find_nearest_line (bfd * abfd,
12915 asection * section,
12916 asymbol ** symbols,
12917 bfd_vma offset,
12918 const char ** filename_ptr,
12919 const char ** functionname_ptr,
12920 unsigned int * line_ptr)
12921 {
12922 bfd_boolean found = FALSE;
12923
12924 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
12925
12926 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
12927 section, symbols, offset,
12928 filename_ptr, functionname_ptr,
12929 line_ptr, NULL, 0,
12930 & elf_tdata (abfd)->dwarf2_find_line_info))
12931 {
12932 if (!*functionname_ptr)
12933 arm_elf_find_function (abfd, section, symbols, offset,
12934 *filename_ptr ? NULL : filename_ptr,
12935 functionname_ptr);
12936
12937 return TRUE;
12938 }
12939
12940 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
12941 & found, filename_ptr,
12942 functionname_ptr, line_ptr,
12943 & elf_tdata (abfd)->line_info))
12944 return FALSE;
12945
12946 if (found && (*functionname_ptr || *line_ptr))
12947 return TRUE;
12948
12949 if (symbols == NULL)
12950 return FALSE;
12951
12952 if (! arm_elf_find_function (abfd, section, symbols, offset,
12953 filename_ptr, functionname_ptr))
12954 return FALSE;
12955
12956 *line_ptr = 0;
12957 return TRUE;
12958 }
12959
12960 static bfd_boolean
12961 elf32_arm_find_inliner_info (bfd * abfd,
12962 const char ** filename_ptr,
12963 const char ** functionname_ptr,
12964 unsigned int * line_ptr)
12965 {
12966 bfd_boolean found;
12967 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
12968 functionname_ptr, line_ptr,
12969 & elf_tdata (abfd)->dwarf2_find_line_info);
12970 return found;
12971 }
12972
12973 /* Adjust a symbol defined by a dynamic object and referenced by a
12974 regular object. The current definition is in some section of the
12975 dynamic object, but we're not including those sections. We have to
12976 change the definition to something the rest of the link can
12977 understand. */
12978
12979 static bfd_boolean
12980 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
12981 struct elf_link_hash_entry * h)
12982 {
12983 bfd * dynobj;
12984 asection * s;
12985 struct elf32_arm_link_hash_entry * eh;
12986 struct elf32_arm_link_hash_table *globals;
12987
12988 globals = elf32_arm_hash_table (info);
12989 if (globals == NULL)
12990 return FALSE;
12991
12992 dynobj = elf_hash_table (info)->dynobj;
12993
12994 /* Make sure we know what is going on here. */
12995 BFD_ASSERT (dynobj != NULL
12996 && (h->needs_plt
12997 || h->type == STT_GNU_IFUNC
12998 || h->u.weakdef != NULL
12999 || (h->def_dynamic
13000 && h->ref_regular
13001 && !h->def_regular)));
13002
13003 eh = (struct elf32_arm_link_hash_entry *) h;
13004
13005 /* If this is a function, put it in the procedure linkage table. We
13006 will fill in the contents of the procedure linkage table later,
13007 when we know the address of the .got section. */
13008 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
13009 {
13010 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13011 symbol binds locally. */
13012 if (h->plt.refcount <= 0
13013 || (h->type != STT_GNU_IFUNC
13014 && (SYMBOL_CALLS_LOCAL (info, h)
13015 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
13016 && h->root.type == bfd_link_hash_undefweak))))
13017 {
13018 /* This case can occur if we saw a PLT32 reloc in an input
13019 file, but the symbol was never referred to by a dynamic
13020 object, or if all references were garbage collected. In
13021 such a case, we don't actually need to build a procedure
13022 linkage table, and we can just do a PC24 reloc instead. */
13023 h->plt.offset = (bfd_vma) -1;
13024 eh->plt.thumb_refcount = 0;
13025 eh->plt.maybe_thumb_refcount = 0;
13026 eh->plt.noncall_refcount = 0;
13027 h->needs_plt = 0;
13028 }
13029
13030 return TRUE;
13031 }
13032 else
13033 {
13034 /* It's possible that we incorrectly decided a .plt reloc was
13035 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13036 in check_relocs. We can't decide accurately between function
13037 and non-function syms in check-relocs; Objects loaded later in
13038 the link may change h->type. So fix it now. */
13039 h->plt.offset = (bfd_vma) -1;
13040 eh->plt.thumb_refcount = 0;
13041 eh->plt.maybe_thumb_refcount = 0;
13042 eh->plt.noncall_refcount = 0;
13043 }
13044
13045 /* If this is a weak symbol, and there is a real definition, the
13046 processor independent code will have arranged for us to see the
13047 real definition first, and we can just use the same value. */
13048 if (h->u.weakdef != NULL)
13049 {
13050 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13051 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13052 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13053 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13054 return TRUE;
13055 }
13056
13057 /* If there are no non-GOT references, we do not need a copy
13058 relocation. */
13059 if (!h->non_got_ref)
13060 return TRUE;
13061
13062 /* This is a reference to a symbol defined by a dynamic object which
13063 is not a function. */
13064
13065 /* If we are creating a shared library, we must presume that the
13066 only references to the symbol are via the global offset table.
13067 For such cases we need not do anything here; the relocations will
13068 be handled correctly by relocate_section. Relocatable executables
13069 can reference data in shared objects directly, so we don't need to
13070 do anything here. */
13071 if (info->shared || globals->root.is_relocatable_executable)
13072 return TRUE;
13073
13074 /* We must allocate the symbol in our .dynbss section, which will
13075 become part of the .bss section of the executable. There will be
13076 an entry for this symbol in the .dynsym section. The dynamic
13077 object will contain position independent code, so all references
13078 from the dynamic object to this symbol will go through the global
13079 offset table. The dynamic linker will use the .dynsym entry to
13080 determine the address it must put in the global offset table, so
13081 both the dynamic object and the regular object will refer to the
13082 same memory location for the variable. */
13083 s = bfd_get_linker_section (dynobj, ".dynbss");
13084 BFD_ASSERT (s != NULL);
13085
13086 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13087 copy the initial value out of the dynamic object and into the
13088 runtime process image. We need to remember the offset into the
13089 .rel(a).bss section we are going to use. */
13090 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13091 {
13092 asection *srel;
13093
13094 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
13095 elf32_arm_allocate_dynrelocs (info, srel, 1);
13096 h->needs_copy = 1;
13097 }
13098
13099 return _bfd_elf_adjust_dynamic_copy (h, s);
13100 }
13101
13102 /* Allocate space in .plt, .got and associated reloc sections for
13103 dynamic relocs. */
13104
13105 static bfd_boolean
13106 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13107 {
13108 struct bfd_link_info *info;
13109 struct elf32_arm_link_hash_table *htab;
13110 struct elf32_arm_link_hash_entry *eh;
13111 struct elf_dyn_relocs *p;
13112
13113 if (h->root.type == bfd_link_hash_indirect)
13114 return TRUE;
13115
13116 eh = (struct elf32_arm_link_hash_entry *) h;
13117
13118 info = (struct bfd_link_info *) inf;
13119 htab = elf32_arm_hash_table (info);
13120 if (htab == NULL)
13121 return FALSE;
13122
13123 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13124 && h->plt.refcount > 0)
13125 {
13126 /* Make sure this symbol is output as a dynamic symbol.
13127 Undefined weak syms won't yet be marked as dynamic. */
13128 if (h->dynindx == -1
13129 && !h->forced_local)
13130 {
13131 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13132 return FALSE;
13133 }
13134
13135 /* If the call in the PLT entry binds locally, the associated
13136 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13137 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13138 than the .plt section. */
13139 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13140 {
13141 eh->is_iplt = 1;
13142 if (eh->plt.noncall_refcount == 0
13143 && SYMBOL_REFERENCES_LOCAL (info, h))
13144 /* All non-call references can be resolved directly.
13145 This means that they can (and in some cases, must)
13146 resolve directly to the run-time target, rather than
13147 to the PLT. That in turns means that any .got entry
13148 would be equal to the .igot.plt entry, so there's
13149 no point having both. */
13150 h->got.refcount = 0;
13151 }
13152
13153 if (info->shared
13154 || eh->is_iplt
13155 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13156 {
13157 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13158
13159 /* If this symbol is not defined in a regular file, and we are
13160 not generating a shared library, then set the symbol to this
13161 location in the .plt. This is required to make function
13162 pointers compare as equal between the normal executable and
13163 the shared library. */
13164 if (! info->shared
13165 && !h->def_regular)
13166 {
13167 h->root.u.def.section = htab->root.splt;
13168 h->root.u.def.value = h->plt.offset;
13169
13170 /* Make sure the function is not marked as Thumb, in case
13171 it is the target of an ABS32 relocation, which will
13172 point to the PLT entry. */
13173 h->target_internal = ST_BRANCH_TO_ARM;
13174 }
13175
13176 htab->next_tls_desc_index++;
13177
13178 /* VxWorks executables have a second set of relocations for
13179 each PLT entry. They go in a separate relocation section,
13180 which is processed by the kernel loader. */
13181 if (htab->vxworks_p && !info->shared)
13182 {
13183 /* There is a relocation for the initial PLT entry:
13184 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13185 if (h->plt.offset == htab->plt_header_size)
13186 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13187
13188 /* There are two extra relocations for each subsequent
13189 PLT entry: an R_ARM_32 relocation for the GOT entry,
13190 and an R_ARM_32 relocation for the PLT entry. */
13191 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13192 }
13193 }
13194 else
13195 {
13196 h->plt.offset = (bfd_vma) -1;
13197 h->needs_plt = 0;
13198 }
13199 }
13200 else
13201 {
13202 h->plt.offset = (bfd_vma) -1;
13203 h->needs_plt = 0;
13204 }
13205
13206 eh = (struct elf32_arm_link_hash_entry *) h;
13207 eh->tlsdesc_got = (bfd_vma) -1;
13208
13209 if (h->got.refcount > 0)
13210 {
13211 asection *s;
13212 bfd_boolean dyn;
13213 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13214 int indx;
13215
13216 /* Make sure this symbol is output as a dynamic symbol.
13217 Undefined weak syms won't yet be marked as dynamic. */
13218 if (h->dynindx == -1
13219 && !h->forced_local)
13220 {
13221 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13222 return FALSE;
13223 }
13224
13225 if (!htab->symbian_p)
13226 {
13227 s = htab->root.sgot;
13228 h->got.offset = s->size;
13229
13230 if (tls_type == GOT_UNKNOWN)
13231 abort ();
13232
13233 if (tls_type == GOT_NORMAL)
13234 /* Non-TLS symbols need one GOT slot. */
13235 s->size += 4;
13236 else
13237 {
13238 if (tls_type & GOT_TLS_GDESC)
13239 {
13240 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13241 eh->tlsdesc_got
13242 = (htab->root.sgotplt->size
13243 - elf32_arm_compute_jump_table_size (htab));
13244 htab->root.sgotplt->size += 8;
13245 h->got.offset = (bfd_vma) -2;
13246 /* plt.got_offset needs to know there's a TLS_DESC
13247 reloc in the middle of .got.plt. */
13248 htab->num_tls_desc++;
13249 }
13250
13251 if (tls_type & GOT_TLS_GD)
13252 {
13253 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13254 the symbol is both GD and GDESC, got.offset may
13255 have been overwritten. */
13256 h->got.offset = s->size;
13257 s->size += 8;
13258 }
13259
13260 if (tls_type & GOT_TLS_IE)
13261 /* R_ARM_TLS_IE32 needs one GOT slot. */
13262 s->size += 4;
13263 }
13264
13265 dyn = htab->root.dynamic_sections_created;
13266
13267 indx = 0;
13268 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13269 && (!info->shared
13270 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13271 indx = h->dynindx;
13272
13273 if (tls_type != GOT_NORMAL
13274 && (info->shared || indx != 0)
13275 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13276 || h->root.type != bfd_link_hash_undefweak))
13277 {
13278 if (tls_type & GOT_TLS_IE)
13279 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13280
13281 if (tls_type & GOT_TLS_GD)
13282 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13283
13284 if (tls_type & GOT_TLS_GDESC)
13285 {
13286 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13287 /* GDESC needs a trampoline to jump to. */
13288 htab->tls_trampoline = -1;
13289 }
13290
13291 /* Only GD needs it. GDESC just emits one relocation per
13292 2 entries. */
13293 if ((tls_type & GOT_TLS_GD) && indx != 0)
13294 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13295 }
13296 else if (!SYMBOL_REFERENCES_LOCAL (info, h))
13297 {
13298 if (htab->root.dynamic_sections_created)
13299 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13300 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13301 }
13302 else if (h->type == STT_GNU_IFUNC
13303 && eh->plt.noncall_refcount == 0)
13304 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13305 they all resolve dynamically instead. Reserve room for the
13306 GOT entry's R_ARM_IRELATIVE relocation. */
13307 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13308 else if (info->shared)
13309 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13310 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13311 }
13312 }
13313 else
13314 h->got.offset = (bfd_vma) -1;
13315
13316 /* Allocate stubs for exported Thumb functions on v4t. */
13317 if (!htab->use_blx && h->dynindx != -1
13318 && h->def_regular
13319 && h->target_internal == ST_BRANCH_TO_THUMB
13320 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13321 {
13322 struct elf_link_hash_entry * th;
13323 struct bfd_link_hash_entry * bh;
13324 struct elf_link_hash_entry * myh;
13325 char name[1024];
13326 asection *s;
13327 bh = NULL;
13328 /* Create a new symbol to regist the real location of the function. */
13329 s = h->root.u.def.section;
13330 sprintf (name, "__real_%s", h->root.root.string);
13331 _bfd_generic_link_add_one_symbol (info, s->owner,
13332 name, BSF_GLOBAL, s,
13333 h->root.u.def.value,
13334 NULL, TRUE, FALSE, &bh);
13335
13336 myh = (struct elf_link_hash_entry *) bh;
13337 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13338 myh->forced_local = 1;
13339 myh->target_internal = ST_BRANCH_TO_THUMB;
13340 eh->export_glue = myh;
13341 th = record_arm_to_thumb_glue (info, h);
13342 /* Point the symbol at the stub. */
13343 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13344 h->target_internal = ST_BRANCH_TO_ARM;
13345 h->root.u.def.section = th->root.u.def.section;
13346 h->root.u.def.value = th->root.u.def.value & ~1;
13347 }
13348
13349 if (eh->dyn_relocs == NULL)
13350 return TRUE;
13351
13352 /* In the shared -Bsymbolic case, discard space allocated for
13353 dynamic pc-relative relocs against symbols which turn out to be
13354 defined in regular objects. For the normal shared case, discard
13355 space for pc-relative relocs that have become local due to symbol
13356 visibility changes. */
13357
13358 if (info->shared || htab->root.is_relocatable_executable)
13359 {
13360 /* The only relocs that use pc_count are R_ARM_REL32 and
13361 R_ARM_REL32_NOI, which will appear on something like
13362 ".long foo - .". We want calls to protected symbols to resolve
13363 directly to the function rather than going via the plt. If people
13364 want function pointer comparisons to work as expected then they
13365 should avoid writing assembly like ".long foo - .". */
13366 if (SYMBOL_CALLS_LOCAL (info, h))
13367 {
13368 struct elf_dyn_relocs **pp;
13369
13370 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13371 {
13372 p->count -= p->pc_count;
13373 p->pc_count = 0;
13374 if (p->count == 0)
13375 *pp = p->next;
13376 else
13377 pp = &p->next;
13378 }
13379 }
13380
13381 if (htab->vxworks_p)
13382 {
13383 struct elf_dyn_relocs **pp;
13384
13385 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13386 {
13387 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13388 *pp = p->next;
13389 else
13390 pp = &p->next;
13391 }
13392 }
13393
13394 /* Also discard relocs on undefined weak syms with non-default
13395 visibility. */
13396 if (eh->dyn_relocs != NULL
13397 && h->root.type == bfd_link_hash_undefweak)
13398 {
13399 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13400 eh->dyn_relocs = NULL;
13401
13402 /* Make sure undefined weak symbols are output as a dynamic
13403 symbol in PIEs. */
13404 else if (h->dynindx == -1
13405 && !h->forced_local)
13406 {
13407 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13408 return FALSE;
13409 }
13410 }
13411
13412 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13413 && h->root.type == bfd_link_hash_new)
13414 {
13415 /* Output absolute symbols so that we can create relocations
13416 against them. For normal symbols we output a relocation
13417 against the section that contains them. */
13418 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13419 return FALSE;
13420 }
13421
13422 }
13423 else
13424 {
13425 /* For the non-shared case, discard space for relocs against
13426 symbols which turn out to need copy relocs or are not
13427 dynamic. */
13428
13429 if (!h->non_got_ref
13430 && ((h->def_dynamic
13431 && !h->def_regular)
13432 || (htab->root.dynamic_sections_created
13433 && (h->root.type == bfd_link_hash_undefweak
13434 || h->root.type == bfd_link_hash_undefined))))
13435 {
13436 /* Make sure this symbol is output as a dynamic symbol.
13437 Undefined weak syms won't yet be marked as dynamic. */
13438 if (h->dynindx == -1
13439 && !h->forced_local)
13440 {
13441 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13442 return FALSE;
13443 }
13444
13445 /* If that succeeded, we know we'll be keeping all the
13446 relocs. */
13447 if (h->dynindx != -1)
13448 goto keep;
13449 }
13450
13451 eh->dyn_relocs = NULL;
13452
13453 keep: ;
13454 }
13455
13456 /* Finally, allocate space. */
13457 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13458 {
13459 asection *sreloc = elf_section_data (p->sec)->sreloc;
13460 if (h->type == STT_GNU_IFUNC
13461 && eh->plt.noncall_refcount == 0
13462 && SYMBOL_REFERENCES_LOCAL (info, h))
13463 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13464 else
13465 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13466 }
13467
13468 return TRUE;
13469 }
13470
13471 /* Find any dynamic relocs that apply to read-only sections. */
13472
13473 static bfd_boolean
13474 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13475 {
13476 struct elf32_arm_link_hash_entry * eh;
13477 struct elf_dyn_relocs * p;
13478
13479 eh = (struct elf32_arm_link_hash_entry *) h;
13480 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13481 {
13482 asection *s = p->sec;
13483
13484 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13485 {
13486 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13487
13488 info->flags |= DF_TEXTREL;
13489
13490 /* Not an error, just cut short the traversal. */
13491 return FALSE;
13492 }
13493 }
13494 return TRUE;
13495 }
13496
13497 void
13498 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13499 int byteswap_code)
13500 {
13501 struct elf32_arm_link_hash_table *globals;
13502
13503 globals = elf32_arm_hash_table (info);
13504 if (globals == NULL)
13505 return;
13506
13507 globals->byteswap_code = byteswap_code;
13508 }
13509
13510 /* Set the sizes of the dynamic sections. */
13511
13512 static bfd_boolean
13513 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13514 struct bfd_link_info * info)
13515 {
13516 bfd * dynobj;
13517 asection * s;
13518 bfd_boolean plt;
13519 bfd_boolean relocs;
13520 bfd *ibfd;
13521 struct elf32_arm_link_hash_table *htab;
13522
13523 htab = elf32_arm_hash_table (info);
13524 if (htab == NULL)
13525 return FALSE;
13526
13527 dynobj = elf_hash_table (info)->dynobj;
13528 BFD_ASSERT (dynobj != NULL);
13529 check_use_blx (htab);
13530
13531 if (elf_hash_table (info)->dynamic_sections_created)
13532 {
13533 /* Set the contents of the .interp section to the interpreter. */
13534 if (info->executable)
13535 {
13536 s = bfd_get_linker_section (dynobj, ".interp");
13537 BFD_ASSERT (s != NULL);
13538 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13539 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13540 }
13541 }
13542
13543 /* Set up .got offsets for local syms, and space for local dynamic
13544 relocs. */
13545 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13546 {
13547 bfd_signed_vma *local_got;
13548 bfd_signed_vma *end_local_got;
13549 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13550 char *local_tls_type;
13551 bfd_vma *local_tlsdesc_gotent;
13552 bfd_size_type locsymcount;
13553 Elf_Internal_Shdr *symtab_hdr;
13554 asection *srel;
13555 bfd_boolean is_vxworks = htab->vxworks_p;
13556 unsigned int symndx;
13557
13558 if (! is_arm_elf (ibfd))
13559 continue;
13560
13561 for (s = ibfd->sections; s != NULL; s = s->next)
13562 {
13563 struct elf_dyn_relocs *p;
13564
13565 for (p = (struct elf_dyn_relocs *)
13566 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13567 {
13568 if (!bfd_is_abs_section (p->sec)
13569 && bfd_is_abs_section (p->sec->output_section))
13570 {
13571 /* Input section has been discarded, either because
13572 it is a copy of a linkonce section or due to
13573 linker script /DISCARD/, so we'll be discarding
13574 the relocs too. */
13575 }
13576 else if (is_vxworks
13577 && strcmp (p->sec->output_section->name,
13578 ".tls_vars") == 0)
13579 {
13580 /* Relocations in vxworks .tls_vars sections are
13581 handled specially by the loader. */
13582 }
13583 else if (p->count != 0)
13584 {
13585 srel = elf_section_data (p->sec)->sreloc;
13586 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13587 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13588 info->flags |= DF_TEXTREL;
13589 }
13590 }
13591 }
13592
13593 local_got = elf_local_got_refcounts (ibfd);
13594 if (!local_got)
13595 continue;
13596
13597 symtab_hdr = & elf_symtab_hdr (ibfd);
13598 locsymcount = symtab_hdr->sh_info;
13599 end_local_got = local_got + locsymcount;
13600 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13601 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13602 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13603 symndx = 0;
13604 s = htab->root.sgot;
13605 srel = htab->root.srelgot;
13606 for (; local_got < end_local_got;
13607 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13608 ++local_tlsdesc_gotent, ++symndx)
13609 {
13610 *local_tlsdesc_gotent = (bfd_vma) -1;
13611 local_iplt = *local_iplt_ptr;
13612 if (local_iplt != NULL)
13613 {
13614 struct elf_dyn_relocs *p;
13615
13616 if (local_iplt->root.refcount > 0)
13617 {
13618 elf32_arm_allocate_plt_entry (info, TRUE,
13619 &local_iplt->root,
13620 &local_iplt->arm);
13621 if (local_iplt->arm.noncall_refcount == 0)
13622 /* All references to the PLT are calls, so all
13623 non-call references can resolve directly to the
13624 run-time target. This means that the .got entry
13625 would be the same as the .igot.plt entry, so there's
13626 no point creating both. */
13627 *local_got = 0;
13628 }
13629 else
13630 {
13631 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13632 local_iplt->root.offset = (bfd_vma) -1;
13633 }
13634
13635 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13636 {
13637 asection *psrel;
13638
13639 psrel = elf_section_data (p->sec)->sreloc;
13640 if (local_iplt->arm.noncall_refcount == 0)
13641 elf32_arm_allocate_irelocs (info, psrel, p->count);
13642 else
13643 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13644 }
13645 }
13646 if (*local_got > 0)
13647 {
13648 Elf_Internal_Sym *isym;
13649
13650 *local_got = s->size;
13651 if (*local_tls_type & GOT_TLS_GD)
13652 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13653 s->size += 8;
13654 if (*local_tls_type & GOT_TLS_GDESC)
13655 {
13656 *local_tlsdesc_gotent = htab->root.sgotplt->size
13657 - elf32_arm_compute_jump_table_size (htab);
13658 htab->root.sgotplt->size += 8;
13659 *local_got = (bfd_vma) -2;
13660 /* plt.got_offset needs to know there's a TLS_DESC
13661 reloc in the middle of .got.plt. */
13662 htab->num_tls_desc++;
13663 }
13664 if (*local_tls_type & GOT_TLS_IE)
13665 s->size += 4;
13666
13667 if (*local_tls_type & GOT_NORMAL)
13668 {
13669 /* If the symbol is both GD and GDESC, *local_got
13670 may have been overwritten. */
13671 *local_got = s->size;
13672 s->size += 4;
13673 }
13674
13675 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13676 if (isym == NULL)
13677 return FALSE;
13678
13679 /* If all references to an STT_GNU_IFUNC PLT are calls,
13680 then all non-call references, including this GOT entry,
13681 resolve directly to the run-time target. */
13682 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13683 && (local_iplt == NULL
13684 || local_iplt->arm.noncall_refcount == 0))
13685 elf32_arm_allocate_irelocs (info, srel, 1);
13686 else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13687 || *local_tls_type & GOT_TLS_GD)
13688 elf32_arm_allocate_dynrelocs (info, srel, 1);
13689
13690 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13691 {
13692 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13693 htab->tls_trampoline = -1;
13694 }
13695 }
13696 else
13697 *local_got = (bfd_vma) -1;
13698 }
13699 }
13700
13701 if (htab->tls_ldm_got.refcount > 0)
13702 {
13703 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13704 for R_ARM_TLS_LDM32 relocations. */
13705 htab->tls_ldm_got.offset = htab->root.sgot->size;
13706 htab->root.sgot->size += 8;
13707 if (info->shared)
13708 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13709 }
13710 else
13711 htab->tls_ldm_got.offset = -1;
13712
13713 /* Allocate global sym .plt and .got entries, and space for global
13714 sym dynamic relocs. */
13715 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13716
13717 /* Here we rummage through the found bfds to collect glue information. */
13718 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13719 {
13720 if (! is_arm_elf (ibfd))
13721 continue;
13722
13723 /* Initialise mapping tables for code/data. */
13724 bfd_elf32_arm_init_maps (ibfd);
13725
13726 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13727 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13728 /* xgettext:c-format */
13729 _bfd_error_handler (_("Errors encountered processing file %s"),
13730 ibfd->filename);
13731 }
13732
13733 /* Allocate space for the glue sections now that we've sized them. */
13734 bfd_elf32_arm_allocate_interworking_sections (info);
13735
13736 /* For every jump slot reserved in the sgotplt, reloc_count is
13737 incremented. However, when we reserve space for TLS descriptors,
13738 it's not incremented, so in order to compute the space reserved
13739 for them, it suffices to multiply the reloc count by the jump
13740 slot size. */
13741 if (htab->root.srelplt)
13742 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13743
13744 if (htab->tls_trampoline)
13745 {
13746 if (htab->root.splt->size == 0)
13747 htab->root.splt->size += htab->plt_header_size;
13748
13749 htab->tls_trampoline = htab->root.splt->size;
13750 htab->root.splt->size += htab->plt_entry_size;
13751
13752 /* If we're not using lazy TLS relocations, don't generate the
13753 PLT and GOT entries they require. */
13754 if (!(info->flags & DF_BIND_NOW))
13755 {
13756 htab->dt_tlsdesc_got = htab->root.sgot->size;
13757 htab->root.sgot->size += 4;
13758
13759 htab->dt_tlsdesc_plt = htab->root.splt->size;
13760 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13761 }
13762 }
13763
13764 /* The check_relocs and adjust_dynamic_symbol entry points have
13765 determined the sizes of the various dynamic sections. Allocate
13766 memory for them. */
13767 plt = FALSE;
13768 relocs = FALSE;
13769 for (s = dynobj->sections; s != NULL; s = s->next)
13770 {
13771 const char * name;
13772
13773 if ((s->flags & SEC_LINKER_CREATED) == 0)
13774 continue;
13775
13776 /* It's OK to base decisions on the section name, because none
13777 of the dynobj section names depend upon the input files. */
13778 name = bfd_get_section_name (dynobj, s);
13779
13780 if (s == htab->root.splt)
13781 {
13782 /* Remember whether there is a PLT. */
13783 plt = s->size != 0;
13784 }
13785 else if (CONST_STRNEQ (name, ".rel"))
13786 {
13787 if (s->size != 0)
13788 {
13789 /* Remember whether there are any reloc sections other
13790 than .rel(a).plt and .rela.plt.unloaded. */
13791 if (s != htab->root.srelplt && s != htab->srelplt2)
13792 relocs = TRUE;
13793
13794 /* We use the reloc_count field as a counter if we need
13795 to copy relocs into the output file. */
13796 s->reloc_count = 0;
13797 }
13798 }
13799 else if (s != htab->root.sgot
13800 && s != htab->root.sgotplt
13801 && s != htab->root.iplt
13802 && s != htab->root.igotplt
13803 && s != htab->sdynbss)
13804 {
13805 /* It's not one of our sections, so don't allocate space. */
13806 continue;
13807 }
13808
13809 if (s->size == 0)
13810 {
13811 /* If we don't need this section, strip it from the
13812 output file. This is mostly to handle .rel(a).bss and
13813 .rel(a).plt. We must create both sections in
13814 create_dynamic_sections, because they must be created
13815 before the linker maps input sections to output
13816 sections. The linker does that before
13817 adjust_dynamic_symbol is called, and it is that
13818 function which decides whether anything needs to go
13819 into these sections. */
13820 s->flags |= SEC_EXCLUDE;
13821 continue;
13822 }
13823
13824 if ((s->flags & SEC_HAS_CONTENTS) == 0)
13825 continue;
13826
13827 /* Allocate memory for the section contents. */
13828 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
13829 if (s->contents == NULL)
13830 return FALSE;
13831 }
13832
13833 if (elf_hash_table (info)->dynamic_sections_created)
13834 {
13835 /* Add some entries to the .dynamic section. We fill in the
13836 values later, in elf32_arm_finish_dynamic_sections, but we
13837 must add the entries now so that we get the correct size for
13838 the .dynamic section. The DT_DEBUG entry is filled in by the
13839 dynamic linker and used by the debugger. */
13840 #define add_dynamic_entry(TAG, VAL) \
13841 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
13842
13843 if (info->executable)
13844 {
13845 if (!add_dynamic_entry (DT_DEBUG, 0))
13846 return FALSE;
13847 }
13848
13849 if (plt)
13850 {
13851 if ( !add_dynamic_entry (DT_PLTGOT, 0)
13852 || !add_dynamic_entry (DT_PLTRELSZ, 0)
13853 || !add_dynamic_entry (DT_PLTREL,
13854 htab->use_rel ? DT_REL : DT_RELA)
13855 || !add_dynamic_entry (DT_JMPREL, 0))
13856 return FALSE;
13857
13858 if (htab->dt_tlsdesc_plt &&
13859 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
13860 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
13861 return FALSE;
13862 }
13863
13864 if (relocs)
13865 {
13866 if (htab->use_rel)
13867 {
13868 if (!add_dynamic_entry (DT_REL, 0)
13869 || !add_dynamic_entry (DT_RELSZ, 0)
13870 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
13871 return FALSE;
13872 }
13873 else
13874 {
13875 if (!add_dynamic_entry (DT_RELA, 0)
13876 || !add_dynamic_entry (DT_RELASZ, 0)
13877 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
13878 return FALSE;
13879 }
13880 }
13881
13882 /* If any dynamic relocs apply to a read-only section,
13883 then we need a DT_TEXTREL entry. */
13884 if ((info->flags & DF_TEXTREL) == 0)
13885 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
13886 info);
13887
13888 if ((info->flags & DF_TEXTREL) != 0)
13889 {
13890 if (!add_dynamic_entry (DT_TEXTREL, 0))
13891 return FALSE;
13892 }
13893 if (htab->vxworks_p
13894 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
13895 return FALSE;
13896 }
13897 #undef add_dynamic_entry
13898
13899 return TRUE;
13900 }
13901
13902 /* Size sections even though they're not dynamic. We use it to setup
13903 _TLS_MODULE_BASE_, if needed. */
13904
13905 static bfd_boolean
13906 elf32_arm_always_size_sections (bfd *output_bfd,
13907 struct bfd_link_info *info)
13908 {
13909 asection *tls_sec;
13910
13911 if (info->relocatable)
13912 return TRUE;
13913
13914 tls_sec = elf_hash_table (info)->tls_sec;
13915
13916 if (tls_sec)
13917 {
13918 struct elf_link_hash_entry *tlsbase;
13919
13920 tlsbase = elf_link_hash_lookup
13921 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
13922
13923 if (tlsbase)
13924 {
13925 struct bfd_link_hash_entry *bh = NULL;
13926 const struct elf_backend_data *bed
13927 = get_elf_backend_data (output_bfd);
13928
13929 if (!(_bfd_generic_link_add_one_symbol
13930 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
13931 tls_sec, 0, NULL, FALSE,
13932 bed->collect, &bh)))
13933 return FALSE;
13934
13935 tlsbase->type = STT_TLS;
13936 tlsbase = (struct elf_link_hash_entry *)bh;
13937 tlsbase->def_regular = 1;
13938 tlsbase->other = STV_HIDDEN;
13939 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
13940 }
13941 }
13942 return TRUE;
13943 }
13944
13945 /* Finish up dynamic symbol handling. We set the contents of various
13946 dynamic sections here. */
13947
13948 static bfd_boolean
13949 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
13950 struct bfd_link_info * info,
13951 struct elf_link_hash_entry * h,
13952 Elf_Internal_Sym * sym)
13953 {
13954 struct elf32_arm_link_hash_table *htab;
13955 struct elf32_arm_link_hash_entry *eh;
13956
13957 htab = elf32_arm_hash_table (info);
13958 if (htab == NULL)
13959 return FALSE;
13960
13961 eh = (struct elf32_arm_link_hash_entry *) h;
13962
13963 if (h->plt.offset != (bfd_vma) -1)
13964 {
13965 if (!eh->is_iplt)
13966 {
13967 BFD_ASSERT (h->dynindx != -1);
13968 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
13969 h->dynindx, 0);
13970 }
13971
13972 if (!h->def_regular)
13973 {
13974 /* Mark the symbol as undefined, rather than as defined in
13975 the .plt section. Leave the value alone. */
13976 sym->st_shndx = SHN_UNDEF;
13977 /* If the symbol is weak, we do need to clear the value.
13978 Otherwise, the PLT entry would provide a definition for
13979 the symbol even if the symbol wasn't defined anywhere,
13980 and so the symbol would never be NULL. */
13981 if (!h->ref_regular_nonweak)
13982 sym->st_value = 0;
13983 }
13984 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
13985 {
13986 /* At least one non-call relocation references this .iplt entry,
13987 so the .iplt entry is the function's canonical address. */
13988 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
13989 sym->st_target_internal = ST_BRANCH_TO_ARM;
13990 sym->st_shndx = (_bfd_elf_section_from_bfd_section
13991 (output_bfd, htab->root.iplt->output_section));
13992 sym->st_value = (h->plt.offset
13993 + htab->root.iplt->output_section->vma
13994 + htab->root.iplt->output_offset);
13995 }
13996 }
13997
13998 if (h->needs_copy)
13999 {
14000 asection * s;
14001 Elf_Internal_Rela rel;
14002
14003 /* This symbol needs a copy reloc. Set it up. */
14004 BFD_ASSERT (h->dynindx != -1
14005 && (h->root.type == bfd_link_hash_defined
14006 || h->root.type == bfd_link_hash_defweak));
14007
14008 s = htab->srelbss;
14009 BFD_ASSERT (s != NULL);
14010
14011 rel.r_addend = 0;
14012 rel.r_offset = (h->root.u.def.value
14013 + h->root.u.def.section->output_section->vma
14014 + h->root.u.def.section->output_offset);
14015 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
14016 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
14017 }
14018
14019 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14020 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14021 to the ".got" section. */
14022 if (h == htab->root.hdynamic
14023 || (!htab->vxworks_p && h == htab->root.hgot))
14024 sym->st_shndx = SHN_ABS;
14025
14026 return TRUE;
14027 }
14028
14029 static void
14030 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14031 void *contents,
14032 const unsigned long *template, unsigned count)
14033 {
14034 unsigned ix;
14035
14036 for (ix = 0; ix != count; ix++)
14037 {
14038 unsigned long insn = template[ix];
14039
14040 /* Emit mov pc,rx if bx is not permitted. */
14041 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14042 insn = (insn & 0xf000000f) | 0x01a0f000;
14043 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14044 }
14045 }
14046
14047 /* Finish up the dynamic sections. */
14048
14049 static bfd_boolean
14050 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14051 {
14052 bfd * dynobj;
14053 asection * sgot;
14054 asection * sdyn;
14055 struct elf32_arm_link_hash_table *htab;
14056
14057 htab = elf32_arm_hash_table (info);
14058 if (htab == NULL)
14059 return FALSE;
14060
14061 dynobj = elf_hash_table (info)->dynobj;
14062
14063 sgot = htab->root.sgotplt;
14064 /* A broken linker script might have discarded the dynamic sections.
14065 Catch this here so that we do not seg-fault later on. */
14066 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14067 return FALSE;
14068 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
14069
14070 if (elf_hash_table (info)->dynamic_sections_created)
14071 {
14072 asection *splt;
14073 Elf32_External_Dyn *dyncon, *dynconend;
14074
14075 splt = htab->root.splt;
14076 BFD_ASSERT (splt != NULL && sdyn != NULL);
14077 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14078
14079 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14080 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14081
14082 for (; dyncon < dynconend; dyncon++)
14083 {
14084 Elf_Internal_Dyn dyn;
14085 const char * name;
14086 asection * s;
14087
14088 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14089
14090 switch (dyn.d_tag)
14091 {
14092 unsigned int type;
14093
14094 default:
14095 if (htab->vxworks_p
14096 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14097 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14098 break;
14099
14100 case DT_HASH:
14101 name = ".hash";
14102 goto get_vma_if_bpabi;
14103 case DT_STRTAB:
14104 name = ".dynstr";
14105 goto get_vma_if_bpabi;
14106 case DT_SYMTAB:
14107 name = ".dynsym";
14108 goto get_vma_if_bpabi;
14109 case DT_VERSYM:
14110 name = ".gnu.version";
14111 goto get_vma_if_bpabi;
14112 case DT_VERDEF:
14113 name = ".gnu.version_d";
14114 goto get_vma_if_bpabi;
14115 case DT_VERNEED:
14116 name = ".gnu.version_r";
14117 goto get_vma_if_bpabi;
14118
14119 case DT_PLTGOT:
14120 name = ".got";
14121 goto get_vma;
14122 case DT_JMPREL:
14123 name = RELOC_SECTION (htab, ".plt");
14124 get_vma:
14125 s = bfd_get_section_by_name (output_bfd, name);
14126 if (s == NULL)
14127 {
14128 /* PR ld/14397: Issue an error message if a required section is missing. */
14129 (*_bfd_error_handler)
14130 (_("error: required section '%s' not found in the linker script"), name);
14131 bfd_set_error (bfd_error_invalid_operation);
14132 return FALSE;
14133 }
14134 if (!htab->symbian_p)
14135 dyn.d_un.d_ptr = s->vma;
14136 else
14137 /* In the BPABI, tags in the PT_DYNAMIC section point
14138 at the file offset, not the memory address, for the
14139 convenience of the post linker. */
14140 dyn.d_un.d_ptr = s->filepos;
14141 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14142 break;
14143
14144 get_vma_if_bpabi:
14145 if (htab->symbian_p)
14146 goto get_vma;
14147 break;
14148
14149 case DT_PLTRELSZ:
14150 s = htab->root.srelplt;
14151 BFD_ASSERT (s != NULL);
14152 dyn.d_un.d_val = s->size;
14153 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14154 break;
14155
14156 case DT_RELSZ:
14157 case DT_RELASZ:
14158 if (!htab->symbian_p)
14159 {
14160 /* My reading of the SVR4 ABI indicates that the
14161 procedure linkage table relocs (DT_JMPREL) should be
14162 included in the overall relocs (DT_REL). This is
14163 what Solaris does. However, UnixWare can not handle
14164 that case. Therefore, we override the DT_RELSZ entry
14165 here to make it not include the JMPREL relocs. Since
14166 the linker script arranges for .rel(a).plt to follow all
14167 other relocation sections, we don't have to worry
14168 about changing the DT_REL entry. */
14169 s = htab->root.srelplt;
14170 if (s != NULL)
14171 dyn.d_un.d_val -= s->size;
14172 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14173 break;
14174 }
14175 /* Fall through. */
14176
14177 case DT_REL:
14178 case DT_RELA:
14179 /* In the BPABI, the DT_REL tag must point at the file
14180 offset, not the VMA, of the first relocation
14181 section. So, we use code similar to that in
14182 elflink.c, but do not check for SHF_ALLOC on the
14183 relcoation section, since relocations sections are
14184 never allocated under the BPABI. The comments above
14185 about Unixware notwithstanding, we include all of the
14186 relocations here. */
14187 if (htab->symbian_p)
14188 {
14189 unsigned int i;
14190 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14191 ? SHT_REL : SHT_RELA);
14192 dyn.d_un.d_val = 0;
14193 for (i = 1; i < elf_numsections (output_bfd); i++)
14194 {
14195 Elf_Internal_Shdr *hdr
14196 = elf_elfsections (output_bfd)[i];
14197 if (hdr->sh_type == type)
14198 {
14199 if (dyn.d_tag == DT_RELSZ
14200 || dyn.d_tag == DT_RELASZ)
14201 dyn.d_un.d_val += hdr->sh_size;
14202 else if ((ufile_ptr) hdr->sh_offset
14203 <= dyn.d_un.d_val - 1)
14204 dyn.d_un.d_val = hdr->sh_offset;
14205 }
14206 }
14207 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14208 }
14209 break;
14210
14211 case DT_TLSDESC_PLT:
14212 s = htab->root.splt;
14213 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14214 + htab->dt_tlsdesc_plt);
14215 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14216 break;
14217
14218 case DT_TLSDESC_GOT:
14219 s = htab->root.sgot;
14220 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14221 + htab->dt_tlsdesc_got);
14222 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14223 break;
14224
14225 /* Set the bottom bit of DT_INIT/FINI if the
14226 corresponding function is Thumb. */
14227 case DT_INIT:
14228 name = info->init_function;
14229 goto get_sym;
14230 case DT_FINI:
14231 name = info->fini_function;
14232 get_sym:
14233 /* If it wasn't set by elf_bfd_final_link
14234 then there is nothing to adjust. */
14235 if (dyn.d_un.d_val != 0)
14236 {
14237 struct elf_link_hash_entry * eh;
14238
14239 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14240 FALSE, FALSE, TRUE);
14241 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14242 {
14243 dyn.d_un.d_val |= 1;
14244 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14245 }
14246 }
14247 break;
14248 }
14249 }
14250
14251 /* Fill in the first entry in the procedure linkage table. */
14252 if (splt->size > 0 && htab->plt_header_size)
14253 {
14254 const bfd_vma *plt0_entry;
14255 bfd_vma got_address, plt_address, got_displacement;
14256
14257 /* Calculate the addresses of the GOT and PLT. */
14258 got_address = sgot->output_section->vma + sgot->output_offset;
14259 plt_address = splt->output_section->vma + splt->output_offset;
14260
14261 if (htab->vxworks_p)
14262 {
14263 /* The VxWorks GOT is relocated by the dynamic linker.
14264 Therefore, we must emit relocations rather than simply
14265 computing the values now. */
14266 Elf_Internal_Rela rel;
14267
14268 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14269 put_arm_insn (htab, output_bfd, plt0_entry[0],
14270 splt->contents + 0);
14271 put_arm_insn (htab, output_bfd, plt0_entry[1],
14272 splt->contents + 4);
14273 put_arm_insn (htab, output_bfd, plt0_entry[2],
14274 splt->contents + 8);
14275 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14276
14277 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14278 rel.r_offset = plt_address + 12;
14279 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14280 rel.r_addend = 0;
14281 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14282 htab->srelplt2->contents);
14283 }
14284 else if (htab->nacl_p)
14285 {
14286 unsigned int i;
14287
14288 got_displacement = got_address + 8 - (plt_address + 16);
14289
14290 put_arm_insn (htab, output_bfd,
14291 elf32_arm_nacl_plt0_entry[0]
14292 | arm_movw_immediate (got_displacement),
14293 splt->contents + 0);
14294 put_arm_insn (htab, output_bfd,
14295 elf32_arm_nacl_plt0_entry[1]
14296 | arm_movt_immediate (got_displacement),
14297 splt->contents + 4);
14298 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14299 put_arm_insn (htab, output_bfd,
14300 elf32_arm_nacl_plt0_entry[i],
14301 splt->contents + (i * 4));
14302 }
14303 else
14304 {
14305 got_displacement = got_address - (plt_address + 16);
14306
14307 plt0_entry = elf32_arm_plt0_entry;
14308 put_arm_insn (htab, output_bfd, plt0_entry[0],
14309 splt->contents + 0);
14310 put_arm_insn (htab, output_bfd, plt0_entry[1],
14311 splt->contents + 4);
14312 put_arm_insn (htab, output_bfd, plt0_entry[2],
14313 splt->contents + 8);
14314 put_arm_insn (htab, output_bfd, plt0_entry[3],
14315 splt->contents + 12);
14316
14317 #ifdef FOUR_WORD_PLT
14318 /* The displacement value goes in the otherwise-unused
14319 last word of the second entry. */
14320 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14321 #else
14322 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14323 #endif
14324 }
14325 }
14326
14327 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14328 really seem like the right value. */
14329 if (splt->output_section->owner == output_bfd)
14330 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14331
14332 if (htab->dt_tlsdesc_plt)
14333 {
14334 bfd_vma got_address
14335 = sgot->output_section->vma + sgot->output_offset;
14336 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14337 + htab->root.sgot->output_offset);
14338 bfd_vma plt_address
14339 = splt->output_section->vma + splt->output_offset;
14340
14341 arm_put_trampoline (htab, output_bfd,
14342 splt->contents + htab->dt_tlsdesc_plt,
14343 dl_tlsdesc_lazy_trampoline, 6);
14344
14345 bfd_put_32 (output_bfd,
14346 gotplt_address + htab->dt_tlsdesc_got
14347 - (plt_address + htab->dt_tlsdesc_plt)
14348 - dl_tlsdesc_lazy_trampoline[6],
14349 splt->contents + htab->dt_tlsdesc_plt + 24);
14350 bfd_put_32 (output_bfd,
14351 got_address - (plt_address + htab->dt_tlsdesc_plt)
14352 - dl_tlsdesc_lazy_trampoline[7],
14353 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14354 }
14355
14356 if (htab->tls_trampoline)
14357 {
14358 arm_put_trampoline (htab, output_bfd,
14359 splt->contents + htab->tls_trampoline,
14360 tls_trampoline, 3);
14361 #ifdef FOUR_WORD_PLT
14362 bfd_put_32 (output_bfd, 0x00000000,
14363 splt->contents + htab->tls_trampoline + 12);
14364 #endif
14365 }
14366
14367 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14368 {
14369 /* Correct the .rel(a).plt.unloaded relocations. They will have
14370 incorrect symbol indexes. */
14371 int num_plts;
14372 unsigned char *p;
14373
14374 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14375 / htab->plt_entry_size);
14376 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14377
14378 for (; num_plts; num_plts--)
14379 {
14380 Elf_Internal_Rela rel;
14381
14382 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14383 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14384 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14385 p += RELOC_SIZE (htab);
14386
14387 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14388 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14389 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14390 p += RELOC_SIZE (htab);
14391 }
14392 }
14393 }
14394
14395 /* Fill in the first three entries in the global offset table. */
14396 if (sgot)
14397 {
14398 if (sgot->size > 0)
14399 {
14400 if (sdyn == NULL)
14401 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14402 else
14403 bfd_put_32 (output_bfd,
14404 sdyn->output_section->vma + sdyn->output_offset,
14405 sgot->contents);
14406 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14407 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14408 }
14409
14410 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14411 }
14412
14413 return TRUE;
14414 }
14415
14416 static void
14417 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14418 {
14419 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14420 struct elf32_arm_link_hash_table *globals;
14421
14422 i_ehdrp = elf_elfheader (abfd);
14423
14424 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14425 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14426 else
14427 i_ehdrp->e_ident[EI_OSABI] = 0;
14428 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14429
14430 if (link_info)
14431 {
14432 globals = elf32_arm_hash_table (link_info);
14433 if (globals != NULL && globals->byteswap_code)
14434 i_ehdrp->e_flags |= EF_ARM_BE8;
14435 }
14436
14437 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
14438 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
14439 {
14440 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
14441 if (abi)
14442 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
14443 else
14444 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
14445 }
14446 }
14447
14448 static enum elf_reloc_type_class
14449 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
14450 {
14451 switch ((int) ELF32_R_TYPE (rela->r_info))
14452 {
14453 case R_ARM_RELATIVE:
14454 return reloc_class_relative;
14455 case R_ARM_JUMP_SLOT:
14456 return reloc_class_plt;
14457 case R_ARM_COPY:
14458 return reloc_class_copy;
14459 default:
14460 return reloc_class_normal;
14461 }
14462 }
14463
14464 static void
14465 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14466 {
14467 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14468 }
14469
14470 /* Return TRUE if this is an unwinding table entry. */
14471
14472 static bfd_boolean
14473 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14474 {
14475 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14476 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14477 }
14478
14479
14480 /* Set the type and flags for an ARM section. We do this by
14481 the section name, which is a hack, but ought to work. */
14482
14483 static bfd_boolean
14484 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14485 {
14486 const char * name;
14487
14488 name = bfd_get_section_name (abfd, sec);
14489
14490 if (is_arm_elf_unwind_section_name (abfd, name))
14491 {
14492 hdr->sh_type = SHT_ARM_EXIDX;
14493 hdr->sh_flags |= SHF_LINK_ORDER;
14494 }
14495 return TRUE;
14496 }
14497
14498 /* Handle an ARM specific section when reading an object file. This is
14499 called when bfd_section_from_shdr finds a section with an unknown
14500 type. */
14501
14502 static bfd_boolean
14503 elf32_arm_section_from_shdr (bfd *abfd,
14504 Elf_Internal_Shdr * hdr,
14505 const char *name,
14506 int shindex)
14507 {
14508 /* There ought to be a place to keep ELF backend specific flags, but
14509 at the moment there isn't one. We just keep track of the
14510 sections by their name, instead. Fortunately, the ABI gives
14511 names for all the ARM specific sections, so we will probably get
14512 away with this. */
14513 switch (hdr->sh_type)
14514 {
14515 case SHT_ARM_EXIDX:
14516 case SHT_ARM_PREEMPTMAP:
14517 case SHT_ARM_ATTRIBUTES:
14518 break;
14519
14520 default:
14521 return FALSE;
14522 }
14523
14524 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14525 return FALSE;
14526
14527 return TRUE;
14528 }
14529
14530 static _arm_elf_section_data *
14531 get_arm_elf_section_data (asection * sec)
14532 {
14533 if (sec && sec->owner && is_arm_elf (sec->owner))
14534 return elf32_arm_section_data (sec);
14535 else
14536 return NULL;
14537 }
14538
14539 typedef struct
14540 {
14541 void *flaginfo;
14542 struct bfd_link_info *info;
14543 asection *sec;
14544 int sec_shndx;
14545 int (*func) (void *, const char *, Elf_Internal_Sym *,
14546 asection *, struct elf_link_hash_entry *);
14547 } output_arch_syminfo;
14548
14549 enum map_symbol_type
14550 {
14551 ARM_MAP_ARM,
14552 ARM_MAP_THUMB,
14553 ARM_MAP_DATA
14554 };
14555
14556
14557 /* Output a single mapping symbol. */
14558
14559 static bfd_boolean
14560 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14561 enum map_symbol_type type,
14562 bfd_vma offset)
14563 {
14564 static const char *names[3] = {"$a", "$t", "$d"};
14565 Elf_Internal_Sym sym;
14566
14567 sym.st_value = osi->sec->output_section->vma
14568 + osi->sec->output_offset
14569 + offset;
14570 sym.st_size = 0;
14571 sym.st_other = 0;
14572 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14573 sym.st_shndx = osi->sec_shndx;
14574 sym.st_target_internal = 0;
14575 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14576 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14577 }
14578
14579 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14580 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14581
14582 static bfd_boolean
14583 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14584 bfd_boolean is_iplt_entry_p,
14585 union gotplt_union *root_plt,
14586 struct arm_plt_info *arm_plt)
14587 {
14588 struct elf32_arm_link_hash_table *htab;
14589 bfd_vma addr, plt_header_size;
14590
14591 if (root_plt->offset == (bfd_vma) -1)
14592 return TRUE;
14593
14594 htab = elf32_arm_hash_table (osi->info);
14595 if (htab == NULL)
14596 return FALSE;
14597
14598 if (is_iplt_entry_p)
14599 {
14600 osi->sec = htab->root.iplt;
14601 plt_header_size = 0;
14602 }
14603 else
14604 {
14605 osi->sec = htab->root.splt;
14606 plt_header_size = htab->plt_header_size;
14607 }
14608 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14609 (osi->info->output_bfd, osi->sec->output_section));
14610
14611 addr = root_plt->offset & -2;
14612 if (htab->symbian_p)
14613 {
14614 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14615 return FALSE;
14616 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14617 return FALSE;
14618 }
14619 else if (htab->vxworks_p)
14620 {
14621 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14622 return FALSE;
14623 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14624 return FALSE;
14625 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14626 return FALSE;
14627 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14628 return FALSE;
14629 }
14630 else if (htab->nacl_p)
14631 {
14632 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14633 return FALSE;
14634 }
14635 else
14636 {
14637 bfd_boolean thumb_stub_p;
14638
14639 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14640 if (thumb_stub_p)
14641 {
14642 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14643 return FALSE;
14644 }
14645 #ifdef FOUR_WORD_PLT
14646 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14647 return FALSE;
14648 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14649 return FALSE;
14650 #else
14651 /* A three-word PLT with no Thumb thunk contains only Arm code,
14652 so only need to output a mapping symbol for the first PLT entry and
14653 entries with thumb thunks. */
14654 if (thumb_stub_p || addr == plt_header_size)
14655 {
14656 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14657 return FALSE;
14658 }
14659 #endif
14660 }
14661
14662 return TRUE;
14663 }
14664
14665 /* Output mapping symbols for PLT entries associated with H. */
14666
14667 static bfd_boolean
14668 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14669 {
14670 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14671 struct elf32_arm_link_hash_entry *eh;
14672
14673 if (h->root.type == bfd_link_hash_indirect)
14674 return TRUE;
14675
14676 if (h->root.type == bfd_link_hash_warning)
14677 /* When warning symbols are created, they **replace** the "real"
14678 entry in the hash table, thus we never get to see the real
14679 symbol in a hash traversal. So look at it now. */
14680 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14681
14682 eh = (struct elf32_arm_link_hash_entry *) h;
14683 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14684 &h->plt, &eh->plt);
14685 }
14686
14687 /* Output a single local symbol for a generated stub. */
14688
14689 static bfd_boolean
14690 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14691 bfd_vma offset, bfd_vma size)
14692 {
14693 Elf_Internal_Sym sym;
14694
14695 sym.st_value = osi->sec->output_section->vma
14696 + osi->sec->output_offset
14697 + offset;
14698 sym.st_size = size;
14699 sym.st_other = 0;
14700 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14701 sym.st_shndx = osi->sec_shndx;
14702 sym.st_target_internal = 0;
14703 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
14704 }
14705
14706 static bfd_boolean
14707 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14708 void * in_arg)
14709 {
14710 struct elf32_arm_stub_hash_entry *stub_entry;
14711 asection *stub_sec;
14712 bfd_vma addr;
14713 char *stub_name;
14714 output_arch_syminfo *osi;
14715 const insn_sequence *template_sequence;
14716 enum stub_insn_type prev_type;
14717 int size;
14718 int i;
14719 enum map_symbol_type sym_type;
14720
14721 /* Massage our args to the form they really have. */
14722 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14723 osi = (output_arch_syminfo *) in_arg;
14724
14725 stub_sec = stub_entry->stub_sec;
14726
14727 /* Ensure this stub is attached to the current section being
14728 processed. */
14729 if (stub_sec != osi->sec)
14730 return TRUE;
14731
14732 addr = (bfd_vma) stub_entry->stub_offset;
14733 stub_name = stub_entry->output_name;
14734
14735 template_sequence = stub_entry->stub_template;
14736 switch (template_sequence[0].type)
14737 {
14738 case ARM_TYPE:
14739 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14740 return FALSE;
14741 break;
14742 case THUMB16_TYPE:
14743 case THUMB32_TYPE:
14744 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14745 stub_entry->stub_size))
14746 return FALSE;
14747 break;
14748 default:
14749 BFD_FAIL ();
14750 return 0;
14751 }
14752
14753 prev_type = DATA_TYPE;
14754 size = 0;
14755 for (i = 0; i < stub_entry->stub_template_size; i++)
14756 {
14757 switch (template_sequence[i].type)
14758 {
14759 case ARM_TYPE:
14760 sym_type = ARM_MAP_ARM;
14761 break;
14762
14763 case THUMB16_TYPE:
14764 case THUMB32_TYPE:
14765 sym_type = ARM_MAP_THUMB;
14766 break;
14767
14768 case DATA_TYPE:
14769 sym_type = ARM_MAP_DATA;
14770 break;
14771
14772 default:
14773 BFD_FAIL ();
14774 return FALSE;
14775 }
14776
14777 if (template_sequence[i].type != prev_type)
14778 {
14779 prev_type = template_sequence[i].type;
14780 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
14781 return FALSE;
14782 }
14783
14784 switch (template_sequence[i].type)
14785 {
14786 case ARM_TYPE:
14787 case THUMB32_TYPE:
14788 size += 4;
14789 break;
14790
14791 case THUMB16_TYPE:
14792 size += 2;
14793 break;
14794
14795 case DATA_TYPE:
14796 size += 4;
14797 break;
14798
14799 default:
14800 BFD_FAIL ();
14801 return FALSE;
14802 }
14803 }
14804
14805 return TRUE;
14806 }
14807
14808 /* Output mapping symbols for linker generated sections,
14809 and for those data-only sections that do not have a
14810 $d. */
14811
14812 static bfd_boolean
14813 elf32_arm_output_arch_local_syms (bfd *output_bfd,
14814 struct bfd_link_info *info,
14815 void *flaginfo,
14816 int (*func) (void *, const char *,
14817 Elf_Internal_Sym *,
14818 asection *,
14819 struct elf_link_hash_entry *))
14820 {
14821 output_arch_syminfo osi;
14822 struct elf32_arm_link_hash_table *htab;
14823 bfd_vma offset;
14824 bfd_size_type size;
14825 bfd *input_bfd;
14826
14827 htab = elf32_arm_hash_table (info);
14828 if (htab == NULL)
14829 return FALSE;
14830
14831 check_use_blx (htab);
14832
14833 osi.flaginfo = flaginfo;
14834 osi.info = info;
14835 osi.func = func;
14836
14837 /* Add a $d mapping symbol to data-only sections that
14838 don't have any mapping symbol. This may result in (harmless) redundant
14839 mapping symbols. */
14840 for (input_bfd = info->input_bfds;
14841 input_bfd != NULL;
14842 input_bfd = input_bfd->link_next)
14843 {
14844 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
14845 for (osi.sec = input_bfd->sections;
14846 osi.sec != NULL;
14847 osi.sec = osi.sec->next)
14848 {
14849 if (osi.sec->output_section != NULL
14850 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
14851 != 0)
14852 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
14853 == SEC_HAS_CONTENTS
14854 && get_arm_elf_section_data (osi.sec) != NULL
14855 && get_arm_elf_section_data (osi.sec)->mapcount == 0
14856 && osi.sec->size > 0
14857 && (osi.sec->flags & SEC_EXCLUDE) == 0)
14858 {
14859 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14860 (output_bfd, osi.sec->output_section);
14861 if (osi.sec_shndx != (int)SHN_BAD)
14862 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
14863 }
14864 }
14865 }
14866
14867 /* ARM->Thumb glue. */
14868 if (htab->arm_glue_size > 0)
14869 {
14870 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14871 ARM2THUMB_GLUE_SECTION_NAME);
14872
14873 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14874 (output_bfd, osi.sec->output_section);
14875 if (info->shared || htab->root.is_relocatable_executable
14876 || htab->pic_veneer)
14877 size = ARM2THUMB_PIC_GLUE_SIZE;
14878 else if (htab->use_blx)
14879 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
14880 else
14881 size = ARM2THUMB_STATIC_GLUE_SIZE;
14882
14883 for (offset = 0; offset < htab->arm_glue_size; offset += size)
14884 {
14885 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
14886 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
14887 }
14888 }
14889
14890 /* Thumb->ARM glue. */
14891 if (htab->thumb_glue_size > 0)
14892 {
14893 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14894 THUMB2ARM_GLUE_SECTION_NAME);
14895
14896 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14897 (output_bfd, osi.sec->output_section);
14898 size = THUMB2ARM_GLUE_SIZE;
14899
14900 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
14901 {
14902 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
14903 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
14904 }
14905 }
14906
14907 /* ARMv4 BX veneers. */
14908 if (htab->bx_glue_size > 0)
14909 {
14910 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
14911 ARM_BX_GLUE_SECTION_NAME);
14912
14913 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14914 (output_bfd, osi.sec->output_section);
14915
14916 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
14917 }
14918
14919 /* Long calls stubs. */
14920 if (htab->stub_bfd && htab->stub_bfd->sections)
14921 {
14922 asection* stub_sec;
14923
14924 for (stub_sec = htab->stub_bfd->sections;
14925 stub_sec != NULL;
14926 stub_sec = stub_sec->next)
14927 {
14928 /* Ignore non-stub sections. */
14929 if (!strstr (stub_sec->name, STUB_SUFFIX))
14930 continue;
14931
14932 osi.sec = stub_sec;
14933
14934 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14935 (output_bfd, osi.sec->output_section);
14936
14937 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
14938 }
14939 }
14940
14941 /* Finally, output mapping symbols for the PLT. */
14942 if (htab->root.splt && htab->root.splt->size > 0)
14943 {
14944 osi.sec = htab->root.splt;
14945 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
14946 (output_bfd, osi.sec->output_section));
14947
14948 /* Output mapping symbols for the plt header. SymbianOS does not have a
14949 plt header. */
14950 if (htab->vxworks_p)
14951 {
14952 /* VxWorks shared libraries have no PLT header. */
14953 if (!info->shared)
14954 {
14955 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14956 return FALSE;
14957 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
14958 return FALSE;
14959 }
14960 }
14961 else if (htab->nacl_p)
14962 {
14963 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14964 return FALSE;
14965 }
14966 else if (!htab->symbian_p)
14967 {
14968 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14969 return FALSE;
14970 #ifndef FOUR_WORD_PLT
14971 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
14972 return FALSE;
14973 #endif
14974 }
14975 }
14976 if ((htab->root.splt && htab->root.splt->size > 0)
14977 || (htab->root.iplt && htab->root.iplt->size > 0))
14978 {
14979 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
14980 for (input_bfd = info->input_bfds;
14981 input_bfd != NULL;
14982 input_bfd = input_bfd->link_next)
14983 {
14984 struct arm_local_iplt_info **local_iplt;
14985 unsigned int i, num_syms;
14986
14987 local_iplt = elf32_arm_local_iplt (input_bfd);
14988 if (local_iplt != NULL)
14989 {
14990 num_syms = elf_symtab_hdr (input_bfd).sh_info;
14991 for (i = 0; i < num_syms; i++)
14992 if (local_iplt[i] != NULL
14993 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
14994 &local_iplt[i]->root,
14995 &local_iplt[i]->arm))
14996 return FALSE;
14997 }
14998 }
14999 }
15000 if (htab->dt_tlsdesc_plt != 0)
15001 {
15002 /* Mapping symbols for the lazy tls trampoline. */
15003 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
15004 return FALSE;
15005
15006 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15007 htab->dt_tlsdesc_plt + 24))
15008 return FALSE;
15009 }
15010 if (htab->tls_trampoline != 0)
15011 {
15012 /* Mapping symbols for the tls trampoline. */
15013 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
15014 return FALSE;
15015 #ifdef FOUR_WORD_PLT
15016 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15017 htab->tls_trampoline + 12))
15018 return FALSE;
15019 #endif
15020 }
15021
15022 return TRUE;
15023 }
15024
15025 /* Allocate target specific section data. */
15026
15027 static bfd_boolean
15028 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
15029 {
15030 if (!sec->used_by_bfd)
15031 {
15032 _arm_elf_section_data *sdata;
15033 bfd_size_type amt = sizeof (*sdata);
15034
15035 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
15036 if (sdata == NULL)
15037 return FALSE;
15038 sec->used_by_bfd = sdata;
15039 }
15040
15041 return _bfd_elf_new_section_hook (abfd, sec);
15042 }
15043
15044
15045 /* Used to order a list of mapping symbols by address. */
15046
15047 static int
15048 elf32_arm_compare_mapping (const void * a, const void * b)
15049 {
15050 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
15051 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
15052
15053 if (amap->vma > bmap->vma)
15054 return 1;
15055 else if (amap->vma < bmap->vma)
15056 return -1;
15057 else if (amap->type > bmap->type)
15058 /* Ensure results do not depend on the host qsort for objects with
15059 multiple mapping symbols at the same address by sorting on type
15060 after vma. */
15061 return 1;
15062 else if (amap->type < bmap->type)
15063 return -1;
15064 else
15065 return 0;
15066 }
15067
15068 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15069
15070 static unsigned long
15071 offset_prel31 (unsigned long addr, bfd_vma offset)
15072 {
15073 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15074 }
15075
15076 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15077 relocations. */
15078
15079 static void
15080 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15081 {
15082 unsigned long first_word = bfd_get_32 (output_bfd, from);
15083 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15084
15085 /* High bit of first word is supposed to be zero. */
15086 if ((first_word & 0x80000000ul) == 0)
15087 first_word = offset_prel31 (first_word, offset);
15088
15089 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15090 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15091 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15092 second_word = offset_prel31 (second_word, offset);
15093
15094 bfd_put_32 (output_bfd, first_word, to);
15095 bfd_put_32 (output_bfd, second_word, to + 4);
15096 }
15097
15098 /* Data for make_branch_to_a8_stub(). */
15099
15100 struct a8_branch_to_stub_data
15101 {
15102 asection *writing_section;
15103 bfd_byte *contents;
15104 };
15105
15106
15107 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15108 places for a particular section. */
15109
15110 static bfd_boolean
15111 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15112 void *in_arg)
15113 {
15114 struct elf32_arm_stub_hash_entry *stub_entry;
15115 struct a8_branch_to_stub_data *data;
15116 bfd_byte *contents;
15117 unsigned long branch_insn;
15118 bfd_vma veneered_insn_loc, veneer_entry_loc;
15119 bfd_signed_vma branch_offset;
15120 bfd *abfd;
15121 unsigned int target;
15122
15123 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15124 data = (struct a8_branch_to_stub_data *) in_arg;
15125
15126 if (stub_entry->target_section != data->writing_section
15127 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15128 return TRUE;
15129
15130 contents = data->contents;
15131
15132 veneered_insn_loc = stub_entry->target_section->output_section->vma
15133 + stub_entry->target_section->output_offset
15134 + stub_entry->target_value;
15135
15136 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15137 + stub_entry->stub_sec->output_offset
15138 + stub_entry->stub_offset;
15139
15140 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15141 veneered_insn_loc &= ~3u;
15142
15143 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15144
15145 abfd = stub_entry->target_section->owner;
15146 target = stub_entry->target_value;
15147
15148 /* We attempt to avoid this condition by setting stubs_always_after_branch
15149 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15150 This check is just to be on the safe side... */
15151 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15152 {
15153 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15154 "allocated in unsafe location"), abfd);
15155 return FALSE;
15156 }
15157
15158 switch (stub_entry->stub_type)
15159 {
15160 case arm_stub_a8_veneer_b:
15161 case arm_stub_a8_veneer_b_cond:
15162 branch_insn = 0xf0009000;
15163 goto jump24;
15164
15165 case arm_stub_a8_veneer_blx:
15166 branch_insn = 0xf000e800;
15167 goto jump24;
15168
15169 case arm_stub_a8_veneer_bl:
15170 {
15171 unsigned int i1, j1, i2, j2, s;
15172
15173 branch_insn = 0xf000d000;
15174
15175 jump24:
15176 if (branch_offset < -16777216 || branch_offset > 16777214)
15177 {
15178 /* There's not much we can do apart from complain if this
15179 happens. */
15180 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15181 "of range (input file too large)"), abfd);
15182 return FALSE;
15183 }
15184
15185 /* i1 = not(j1 eor s), so:
15186 not i1 = j1 eor s
15187 j1 = (not i1) eor s. */
15188
15189 branch_insn |= (branch_offset >> 1) & 0x7ff;
15190 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15191 i2 = (branch_offset >> 22) & 1;
15192 i1 = (branch_offset >> 23) & 1;
15193 s = (branch_offset >> 24) & 1;
15194 j1 = (!i1) ^ s;
15195 j2 = (!i2) ^ s;
15196 branch_insn |= j2 << 11;
15197 branch_insn |= j1 << 13;
15198 branch_insn |= s << 26;
15199 }
15200 break;
15201
15202 default:
15203 BFD_FAIL ();
15204 return FALSE;
15205 }
15206
15207 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15208 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15209
15210 return TRUE;
15211 }
15212
15213 /* Do code byteswapping. Return FALSE afterwards so that the section is
15214 written out as normal. */
15215
15216 static bfd_boolean
15217 elf32_arm_write_section (bfd *output_bfd,
15218 struct bfd_link_info *link_info,
15219 asection *sec,
15220 bfd_byte *contents)
15221 {
15222 unsigned int mapcount, errcount;
15223 _arm_elf_section_data *arm_data;
15224 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15225 elf32_arm_section_map *map;
15226 elf32_vfp11_erratum_list *errnode;
15227 bfd_vma ptr;
15228 bfd_vma end;
15229 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15230 bfd_byte tmp;
15231 unsigned int i;
15232
15233 if (globals == NULL)
15234 return FALSE;
15235
15236 /* If this section has not been allocated an _arm_elf_section_data
15237 structure then we cannot record anything. */
15238 arm_data = get_arm_elf_section_data (sec);
15239 if (arm_data == NULL)
15240 return FALSE;
15241
15242 mapcount = arm_data->mapcount;
15243 map = arm_data->map;
15244 errcount = arm_data->erratumcount;
15245
15246 if (errcount != 0)
15247 {
15248 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15249
15250 for (errnode = arm_data->erratumlist; errnode != 0;
15251 errnode = errnode->next)
15252 {
15253 bfd_vma target = errnode->vma - offset;
15254
15255 switch (errnode->type)
15256 {
15257 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15258 {
15259 bfd_vma branch_to_veneer;
15260 /* Original condition code of instruction, plus bit mask for
15261 ARM B instruction. */
15262 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15263 | 0x0a000000;
15264
15265 /* The instruction is before the label. */
15266 target -= 4;
15267
15268 /* Above offset included in -4 below. */
15269 branch_to_veneer = errnode->u.b.veneer->vma
15270 - errnode->vma - 4;
15271
15272 if ((signed) branch_to_veneer < -(1 << 25)
15273 || (signed) branch_to_veneer >= (1 << 25))
15274 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15275 "range"), output_bfd);
15276
15277 insn |= (branch_to_veneer >> 2) & 0xffffff;
15278 contents[endianflip ^ target] = insn & 0xff;
15279 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15280 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15281 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15282 }
15283 break;
15284
15285 case VFP11_ERRATUM_ARM_VENEER:
15286 {
15287 bfd_vma branch_from_veneer;
15288 unsigned int insn;
15289
15290 /* Take size of veneer into account. */
15291 branch_from_veneer = errnode->u.v.branch->vma
15292 - errnode->vma - 12;
15293
15294 if ((signed) branch_from_veneer < -(1 << 25)
15295 || (signed) branch_from_veneer >= (1 << 25))
15296 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15297 "range"), output_bfd);
15298
15299 /* Original instruction. */
15300 insn = errnode->u.v.branch->u.b.vfp_insn;
15301 contents[endianflip ^ target] = insn & 0xff;
15302 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15303 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15304 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15305
15306 /* Branch back to insn after original insn. */
15307 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15308 contents[endianflip ^ (target + 4)] = insn & 0xff;
15309 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15310 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15311 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15312 }
15313 break;
15314
15315 default:
15316 abort ();
15317 }
15318 }
15319 }
15320
15321 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15322 {
15323 arm_unwind_table_edit *edit_node
15324 = arm_data->u.exidx.unwind_edit_list;
15325 /* Now, sec->size is the size of the section we will write. The original
15326 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15327 markers) was sec->rawsize. (This isn't the case if we perform no
15328 edits, then rawsize will be zero and we should use size). */
15329 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15330 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15331 unsigned int in_index, out_index;
15332 bfd_vma add_to_offsets = 0;
15333
15334 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15335 {
15336 if (edit_node)
15337 {
15338 unsigned int edit_index = edit_node->index;
15339
15340 if (in_index < edit_index && in_index * 8 < input_size)
15341 {
15342 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15343 contents + in_index * 8, add_to_offsets);
15344 out_index++;
15345 in_index++;
15346 }
15347 else if (in_index == edit_index
15348 || (in_index * 8 >= input_size
15349 && edit_index == UINT_MAX))
15350 {
15351 switch (edit_node->type)
15352 {
15353 case DELETE_EXIDX_ENTRY:
15354 in_index++;
15355 add_to_offsets += 8;
15356 break;
15357
15358 case INSERT_EXIDX_CANTUNWIND_AT_END:
15359 {
15360 asection *text_sec = edit_node->linked_section;
15361 bfd_vma text_offset = text_sec->output_section->vma
15362 + text_sec->output_offset
15363 + text_sec->size;
15364 bfd_vma exidx_offset = offset + out_index * 8;
15365 unsigned long prel31_offset;
15366
15367 /* Note: this is meant to be equivalent to an
15368 R_ARM_PREL31 relocation. These synthetic
15369 EXIDX_CANTUNWIND markers are not relocated by the
15370 usual BFD method. */
15371 prel31_offset = (text_offset - exidx_offset)
15372 & 0x7ffffffful;
15373
15374 /* First address we can't unwind. */
15375 bfd_put_32 (output_bfd, prel31_offset,
15376 &edited_contents[out_index * 8]);
15377
15378 /* Code for EXIDX_CANTUNWIND. */
15379 bfd_put_32 (output_bfd, 0x1,
15380 &edited_contents[out_index * 8 + 4]);
15381
15382 out_index++;
15383 add_to_offsets -= 8;
15384 }
15385 break;
15386 }
15387
15388 edit_node = edit_node->next;
15389 }
15390 }
15391 else
15392 {
15393 /* No more edits, copy remaining entries verbatim. */
15394 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15395 contents + in_index * 8, add_to_offsets);
15396 out_index++;
15397 in_index++;
15398 }
15399 }
15400
15401 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15402 bfd_set_section_contents (output_bfd, sec->output_section,
15403 edited_contents,
15404 (file_ptr) sec->output_offset, sec->size);
15405
15406 return TRUE;
15407 }
15408
15409 /* Fix code to point to Cortex-A8 erratum stubs. */
15410 if (globals->fix_cortex_a8)
15411 {
15412 struct a8_branch_to_stub_data data;
15413
15414 data.writing_section = sec;
15415 data.contents = contents;
15416
15417 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15418 &data);
15419 }
15420
15421 if (mapcount == 0)
15422 return FALSE;
15423
15424 if (globals->byteswap_code)
15425 {
15426 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15427
15428 ptr = map[0].vma;
15429 for (i = 0; i < mapcount; i++)
15430 {
15431 if (i == mapcount - 1)
15432 end = sec->size;
15433 else
15434 end = map[i + 1].vma;
15435
15436 switch (map[i].type)
15437 {
15438 case 'a':
15439 /* Byte swap code words. */
15440 while (ptr + 3 < end)
15441 {
15442 tmp = contents[ptr];
15443 contents[ptr] = contents[ptr + 3];
15444 contents[ptr + 3] = tmp;
15445 tmp = contents[ptr + 1];
15446 contents[ptr + 1] = contents[ptr + 2];
15447 contents[ptr + 2] = tmp;
15448 ptr += 4;
15449 }
15450 break;
15451
15452 case 't':
15453 /* Byte swap code halfwords. */
15454 while (ptr + 1 < end)
15455 {
15456 tmp = contents[ptr];
15457 contents[ptr] = contents[ptr + 1];
15458 contents[ptr + 1] = tmp;
15459 ptr += 2;
15460 }
15461 break;
15462
15463 case 'd':
15464 /* Leave data alone. */
15465 break;
15466 }
15467 ptr = end;
15468 }
15469 }
15470
15471 free (map);
15472 arm_data->mapcount = -1;
15473 arm_data->mapsize = 0;
15474 arm_data->map = NULL;
15475
15476 return FALSE;
15477 }
15478
15479 /* Mangle thumb function symbols as we read them in. */
15480
15481 static bfd_boolean
15482 elf32_arm_swap_symbol_in (bfd * abfd,
15483 const void *psrc,
15484 const void *pshn,
15485 Elf_Internal_Sym *dst)
15486 {
15487 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15488 return FALSE;
15489
15490 /* New EABI objects mark thumb function symbols by setting the low bit of
15491 the address. */
15492 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15493 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15494 {
15495 if (dst->st_value & 1)
15496 {
15497 dst->st_value &= ~(bfd_vma) 1;
15498 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15499 }
15500 else
15501 dst->st_target_internal = ST_BRANCH_TO_ARM;
15502 }
15503 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15504 {
15505 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15506 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15507 }
15508 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15509 dst->st_target_internal = ST_BRANCH_LONG;
15510 else
15511 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15512
15513 return TRUE;
15514 }
15515
15516
15517 /* Mangle thumb function symbols as we write them out. */
15518
15519 static void
15520 elf32_arm_swap_symbol_out (bfd *abfd,
15521 const Elf_Internal_Sym *src,
15522 void *cdst,
15523 void *shndx)
15524 {
15525 Elf_Internal_Sym newsym;
15526
15527 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15528 of the address set, as per the new EABI. We do this unconditionally
15529 because objcopy does not set the elf header flags until after
15530 it writes out the symbol table. */
15531 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15532 {
15533 newsym = *src;
15534 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15535 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15536 if (newsym.st_shndx != SHN_UNDEF)
15537 {
15538 /* Do this only for defined symbols. At link type, the static
15539 linker will simulate the work of dynamic linker of resolving
15540 symbols and will carry over the thumbness of found symbols to
15541 the output symbol table. It's not clear how it happens, but
15542 the thumbness of undefined symbols can well be different at
15543 runtime, and writing '1' for them will be confusing for users
15544 and possibly for dynamic linker itself.
15545 */
15546 newsym.st_value |= 1;
15547 }
15548
15549 src = &newsym;
15550 }
15551 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15552 }
15553
15554 /* Add the PT_ARM_EXIDX program header. */
15555
15556 static bfd_boolean
15557 elf32_arm_modify_segment_map (bfd *abfd,
15558 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15559 {
15560 struct elf_segment_map *m;
15561 asection *sec;
15562
15563 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15564 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15565 {
15566 /* If there is already a PT_ARM_EXIDX header, then we do not
15567 want to add another one. This situation arises when running
15568 "strip"; the input binary already has the header. */
15569 m = elf_tdata (abfd)->segment_map;
15570 while (m && m->p_type != PT_ARM_EXIDX)
15571 m = m->next;
15572 if (!m)
15573 {
15574 m = (struct elf_segment_map *)
15575 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15576 if (m == NULL)
15577 return FALSE;
15578 m->p_type = PT_ARM_EXIDX;
15579 m->count = 1;
15580 m->sections[0] = sec;
15581
15582 m->next = elf_tdata (abfd)->segment_map;
15583 elf_tdata (abfd)->segment_map = m;
15584 }
15585 }
15586
15587 return TRUE;
15588 }
15589
15590 /* We may add a PT_ARM_EXIDX program header. */
15591
15592 static int
15593 elf32_arm_additional_program_headers (bfd *abfd,
15594 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15595 {
15596 asection *sec;
15597
15598 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15599 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15600 return 1;
15601 else
15602 return 0;
15603 }
15604
15605 /* Hook called by the linker routine which adds symbols from an object
15606 file. */
15607
15608 static bfd_boolean
15609 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15610 Elf_Internal_Sym *sym, const char **namep,
15611 flagword *flagsp, asection **secp, bfd_vma *valp)
15612 {
15613 if ((abfd->flags & DYNAMIC) == 0
15614 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15615 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15616 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15617
15618 if (elf32_arm_hash_table (info)->vxworks_p
15619 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15620 flagsp, secp, valp))
15621 return FALSE;
15622
15623 return TRUE;
15624 }
15625
15626 /* We use this to override swap_symbol_in and swap_symbol_out. */
15627 const struct elf_size_info elf32_arm_size_info =
15628 {
15629 sizeof (Elf32_External_Ehdr),
15630 sizeof (Elf32_External_Phdr),
15631 sizeof (Elf32_External_Shdr),
15632 sizeof (Elf32_External_Rel),
15633 sizeof (Elf32_External_Rela),
15634 sizeof (Elf32_External_Sym),
15635 sizeof (Elf32_External_Dyn),
15636 sizeof (Elf_External_Note),
15637 4,
15638 1,
15639 32, 2,
15640 ELFCLASS32, EV_CURRENT,
15641 bfd_elf32_write_out_phdrs,
15642 bfd_elf32_write_shdrs_and_ehdr,
15643 bfd_elf32_checksum_contents,
15644 bfd_elf32_write_relocs,
15645 elf32_arm_swap_symbol_in,
15646 elf32_arm_swap_symbol_out,
15647 bfd_elf32_slurp_reloc_table,
15648 bfd_elf32_slurp_symbol_table,
15649 bfd_elf32_swap_dyn_in,
15650 bfd_elf32_swap_dyn_out,
15651 bfd_elf32_swap_reloc_in,
15652 bfd_elf32_swap_reloc_out,
15653 bfd_elf32_swap_reloca_in,
15654 bfd_elf32_swap_reloca_out
15655 };
15656
15657 #define ELF_ARCH bfd_arch_arm
15658 #define ELF_TARGET_ID ARM_ELF_DATA
15659 #define ELF_MACHINE_CODE EM_ARM
15660 #ifdef __QNXTARGET__
15661 #define ELF_MAXPAGESIZE 0x1000
15662 #else
15663 #define ELF_MAXPAGESIZE 0x8000
15664 #endif
15665 #define ELF_MINPAGESIZE 0x1000
15666 #define ELF_COMMONPAGESIZE 0x1000
15667
15668 #define bfd_elf32_mkobject elf32_arm_mkobject
15669
15670 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
15671 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
15672 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
15673 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
15674 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
15675 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
15676 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
15677 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
15678 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
15679 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
15680 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
15681 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
15682 #define bfd_elf32_bfd_final_link elf32_arm_final_link
15683
15684 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
15685 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
15686 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
15687 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
15688 #define elf_backend_check_relocs elf32_arm_check_relocs
15689 #define elf_backend_relocate_section elf32_arm_relocate_section
15690 #define elf_backend_write_section elf32_arm_write_section
15691 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
15692 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
15693 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
15694 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
15695 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
15696 #define elf_backend_always_size_sections elf32_arm_always_size_sections
15697 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
15698 #define elf_backend_post_process_headers elf32_arm_post_process_headers
15699 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
15700 #define elf_backend_object_p elf32_arm_object_p
15701 #define elf_backend_fake_sections elf32_arm_fake_sections
15702 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
15703 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15704 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
15705 #define elf_backend_size_info elf32_arm_size_info
15706 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15707 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
15708 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
15709 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
15710 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
15711
15712 #define elf_backend_can_refcount 1
15713 #define elf_backend_can_gc_sections 1
15714 #define elf_backend_plt_readonly 1
15715 #define elf_backend_want_got_plt 1
15716 #define elf_backend_want_plt_sym 0
15717 #define elf_backend_may_use_rel_p 1
15718 #define elf_backend_may_use_rela_p 0
15719 #define elf_backend_default_use_rela_p 0
15720
15721 #define elf_backend_got_header_size 12
15722
15723 #undef elf_backend_obj_attrs_vendor
15724 #define elf_backend_obj_attrs_vendor "aeabi"
15725 #undef elf_backend_obj_attrs_section
15726 #define elf_backend_obj_attrs_section ".ARM.attributes"
15727 #undef elf_backend_obj_attrs_arg_type
15728 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
15729 #undef elf_backend_obj_attrs_section_type
15730 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
15731 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
15732 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
15733
15734 #include "elf32-target.h"
15735
15736 /* Native Client targets. */
15737
15738 #undef TARGET_LITTLE_SYM
15739 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec
15740 #undef TARGET_LITTLE_NAME
15741 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
15742 #undef TARGET_BIG_SYM
15743 #define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec
15744 #undef TARGET_BIG_NAME
15745 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
15746
15747 /* Like elf32_arm_link_hash_table_create -- but overrides
15748 appropriately for NaCl. */
15749
15750 static struct bfd_link_hash_table *
15751 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
15752 {
15753 struct bfd_link_hash_table *ret;
15754
15755 ret = elf32_arm_link_hash_table_create (abfd);
15756 if (ret)
15757 {
15758 struct elf32_arm_link_hash_table *htab
15759 = (struct elf32_arm_link_hash_table *) ret;
15760
15761 htab->nacl_p = 1;
15762
15763 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
15764 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
15765 }
15766 return ret;
15767 }
15768
15769 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
15770 really need to use elf32_arm_modify_segment_map. But we do it
15771 anyway just to reduce gratuitous differences with the stock ARM backend. */
15772
15773 static bfd_boolean
15774 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
15775 {
15776 return (elf32_arm_modify_segment_map (abfd, info)
15777 && nacl_modify_segment_map (abfd, info));
15778 }
15779
15780 #undef elf32_bed
15781 #define elf32_bed elf32_arm_nacl_bed
15782 #undef bfd_elf32_bfd_link_hash_table_create
15783 #define bfd_elf32_bfd_link_hash_table_create \
15784 elf32_arm_nacl_link_hash_table_create
15785 #undef elf_backend_plt_alignment
15786 #define elf_backend_plt_alignment 4
15787 #undef elf_backend_modify_segment_map
15788 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
15789 #undef elf_backend_modify_program_headers
15790 #define elf_backend_modify_program_headers nacl_modify_program_headers
15791
15792 #undef ELF_MAXPAGESIZE
15793 #define ELF_MAXPAGESIZE 0x10000
15794
15795 #include "elf32-target.h"
15796
15797 /* Reset to defaults. */
15798 #undef elf_backend_plt_alignment
15799 #undef elf_backend_modify_segment_map
15800 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15801 #undef elf_backend_modify_program_headers
15802
15803 /* VxWorks Targets. */
15804
15805 #undef TARGET_LITTLE_SYM
15806 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
15807 #undef TARGET_LITTLE_NAME
15808 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
15809 #undef TARGET_BIG_SYM
15810 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
15811 #undef TARGET_BIG_NAME
15812 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
15813
15814 /* Like elf32_arm_link_hash_table_create -- but overrides
15815 appropriately for VxWorks. */
15816
15817 static struct bfd_link_hash_table *
15818 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
15819 {
15820 struct bfd_link_hash_table *ret;
15821
15822 ret = elf32_arm_link_hash_table_create (abfd);
15823 if (ret)
15824 {
15825 struct elf32_arm_link_hash_table *htab
15826 = (struct elf32_arm_link_hash_table *) ret;
15827 htab->use_rel = 0;
15828 htab->vxworks_p = 1;
15829 }
15830 return ret;
15831 }
15832
15833 static void
15834 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
15835 {
15836 elf32_arm_final_write_processing (abfd, linker);
15837 elf_vxworks_final_write_processing (abfd, linker);
15838 }
15839
15840 #undef elf32_bed
15841 #define elf32_bed elf32_arm_vxworks_bed
15842
15843 #undef bfd_elf32_bfd_link_hash_table_create
15844 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
15845 #undef elf_backend_final_write_processing
15846 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
15847 #undef elf_backend_emit_relocs
15848 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
15849
15850 #undef elf_backend_may_use_rel_p
15851 #define elf_backend_may_use_rel_p 0
15852 #undef elf_backend_may_use_rela_p
15853 #define elf_backend_may_use_rela_p 1
15854 #undef elf_backend_default_use_rela_p
15855 #define elf_backend_default_use_rela_p 1
15856 #undef elf_backend_want_plt_sym
15857 #define elf_backend_want_plt_sym 1
15858 #undef ELF_MAXPAGESIZE
15859 #define ELF_MAXPAGESIZE 0x1000
15860
15861 #include "elf32-target.h"
15862
15863
15864 /* Merge backend specific data from an object file to the output
15865 object file when linking. */
15866
15867 static bfd_boolean
15868 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
15869 {
15870 flagword out_flags;
15871 flagword in_flags;
15872 bfd_boolean flags_compatible = TRUE;
15873 asection *sec;
15874
15875 /* Check if we have the same endianness. */
15876 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
15877 return FALSE;
15878
15879 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
15880 return TRUE;
15881
15882 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
15883 return FALSE;
15884
15885 /* The input BFD must have had its flags initialised. */
15886 /* The following seems bogus to me -- The flags are initialized in
15887 the assembler but I don't think an elf_flags_init field is
15888 written into the object. */
15889 /* BFD_ASSERT (elf_flags_init (ibfd)); */
15890
15891 in_flags = elf_elfheader (ibfd)->e_flags;
15892 out_flags = elf_elfheader (obfd)->e_flags;
15893
15894 /* In theory there is no reason why we couldn't handle this. However
15895 in practice it isn't even close to working and there is no real
15896 reason to want it. */
15897 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
15898 && !(ibfd->flags & DYNAMIC)
15899 && (in_flags & EF_ARM_BE8))
15900 {
15901 _bfd_error_handler (_("error: %B is already in final BE8 format"),
15902 ibfd);
15903 return FALSE;
15904 }
15905
15906 if (!elf_flags_init (obfd))
15907 {
15908 /* If the input is the default architecture and had the default
15909 flags then do not bother setting the flags for the output
15910 architecture, instead allow future merges to do this. If no
15911 future merges ever set these flags then they will retain their
15912 uninitialised values, which surprise surprise, correspond
15913 to the default values. */
15914 if (bfd_get_arch_info (ibfd)->the_default
15915 && elf_elfheader (ibfd)->e_flags == 0)
15916 return TRUE;
15917
15918 elf_flags_init (obfd) = TRUE;
15919 elf_elfheader (obfd)->e_flags = in_flags;
15920
15921 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
15922 && bfd_get_arch_info (obfd)->the_default)
15923 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
15924
15925 return TRUE;
15926 }
15927
15928 /* Determine what should happen if the input ARM architecture
15929 does not match the output ARM architecture. */
15930 if (! bfd_arm_merge_machines (ibfd, obfd))
15931 return FALSE;
15932
15933 /* Identical flags must be compatible. */
15934 if (in_flags == out_flags)
15935 return TRUE;
15936
15937 /* Check to see if the input BFD actually contains any sections. If
15938 not, its flags may not have been initialised either, but it
15939 cannot actually cause any incompatiblity. Do not short-circuit
15940 dynamic objects; their section list may be emptied by
15941 elf_link_add_object_symbols.
15942
15943 Also check to see if there are no code sections in the input.
15944 In this case there is no need to check for code specific flags.
15945 XXX - do we need to worry about floating-point format compatability
15946 in data sections ? */
15947 if (!(ibfd->flags & DYNAMIC))
15948 {
15949 bfd_boolean null_input_bfd = TRUE;
15950 bfd_boolean only_data_sections = TRUE;
15951
15952 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
15953 {
15954 /* Ignore synthetic glue sections. */
15955 if (strcmp (sec->name, ".glue_7")
15956 && strcmp (sec->name, ".glue_7t"))
15957 {
15958 if ((bfd_get_section_flags (ibfd, sec)
15959 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15960 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15961 only_data_sections = FALSE;
15962
15963 null_input_bfd = FALSE;
15964 break;
15965 }
15966 }
15967
15968 if (null_input_bfd || only_data_sections)
15969 return TRUE;
15970 }
15971
15972 /* Complain about various flag mismatches. */
15973 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
15974 EF_ARM_EABI_VERSION (out_flags)))
15975 {
15976 _bfd_error_handler
15977 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
15978 ibfd, obfd,
15979 (in_flags & EF_ARM_EABIMASK) >> 24,
15980 (out_flags & EF_ARM_EABIMASK) >> 24);
15981 return FALSE;
15982 }
15983
15984 /* Not sure what needs to be checked for EABI versions >= 1. */
15985 /* VxWorks libraries do not use these flags. */
15986 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
15987 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
15988 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
15989 {
15990 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
15991 {
15992 _bfd_error_handler
15993 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
15994 ibfd, obfd,
15995 in_flags & EF_ARM_APCS_26 ? 26 : 32,
15996 out_flags & EF_ARM_APCS_26 ? 26 : 32);
15997 flags_compatible = FALSE;
15998 }
15999
16000 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
16001 {
16002 if (in_flags & EF_ARM_APCS_FLOAT)
16003 _bfd_error_handler
16004 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
16005 ibfd, obfd);
16006 else
16007 _bfd_error_handler
16008 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
16009 ibfd, obfd);
16010
16011 flags_compatible = FALSE;
16012 }
16013
16014 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
16015 {
16016 if (in_flags & EF_ARM_VFP_FLOAT)
16017 _bfd_error_handler
16018 (_("error: %B uses VFP instructions, whereas %B does not"),
16019 ibfd, obfd);
16020 else
16021 _bfd_error_handler
16022 (_("error: %B uses FPA instructions, whereas %B does not"),
16023 ibfd, obfd);
16024
16025 flags_compatible = FALSE;
16026 }
16027
16028 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
16029 {
16030 if (in_flags & EF_ARM_MAVERICK_FLOAT)
16031 _bfd_error_handler
16032 (_("error: %B uses Maverick instructions, whereas %B does not"),
16033 ibfd, obfd);
16034 else
16035 _bfd_error_handler
16036 (_("error: %B does not use Maverick instructions, whereas %B does"),
16037 ibfd, obfd);
16038
16039 flags_compatible = FALSE;
16040 }
16041
16042 #ifdef EF_ARM_SOFT_FLOAT
16043 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
16044 {
16045 /* We can allow interworking between code that is VFP format
16046 layout, and uses either soft float or integer regs for
16047 passing floating point arguments and results. We already
16048 know that the APCS_FLOAT flags match; similarly for VFP
16049 flags. */
16050 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
16051 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
16052 {
16053 if (in_flags & EF_ARM_SOFT_FLOAT)
16054 _bfd_error_handler
16055 (_("error: %B uses software FP, whereas %B uses hardware FP"),
16056 ibfd, obfd);
16057 else
16058 _bfd_error_handler
16059 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16060 ibfd, obfd);
16061
16062 flags_compatible = FALSE;
16063 }
16064 }
16065 #endif
16066
16067 /* Interworking mismatch is only a warning. */
16068 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16069 {
16070 if (in_flags & EF_ARM_INTERWORK)
16071 {
16072 _bfd_error_handler
16073 (_("Warning: %B supports interworking, whereas %B does not"),
16074 ibfd, obfd);
16075 }
16076 else
16077 {
16078 _bfd_error_handler
16079 (_("Warning: %B does not support interworking, whereas %B does"),
16080 ibfd, obfd);
16081 }
16082 }
16083 }
16084
16085 return flags_compatible;
16086 }
16087
16088
16089 /* Symbian OS Targets. */
16090
16091 #undef TARGET_LITTLE_SYM
16092 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
16093 #undef TARGET_LITTLE_NAME
16094 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16095 #undef TARGET_BIG_SYM
16096 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
16097 #undef TARGET_BIG_NAME
16098 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16099
16100 /* Like elf32_arm_link_hash_table_create -- but overrides
16101 appropriately for Symbian OS. */
16102
16103 static struct bfd_link_hash_table *
16104 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16105 {
16106 struct bfd_link_hash_table *ret;
16107
16108 ret = elf32_arm_link_hash_table_create (abfd);
16109 if (ret)
16110 {
16111 struct elf32_arm_link_hash_table *htab
16112 = (struct elf32_arm_link_hash_table *)ret;
16113 /* There is no PLT header for Symbian OS. */
16114 htab->plt_header_size = 0;
16115 /* The PLT entries are each one instruction and one word. */
16116 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16117 htab->symbian_p = 1;
16118 /* Symbian uses armv5t or above, so use_blx is always true. */
16119 htab->use_blx = 1;
16120 htab->root.is_relocatable_executable = 1;
16121 }
16122 return ret;
16123 }
16124
16125 static const struct bfd_elf_special_section
16126 elf32_arm_symbian_special_sections[] =
16127 {
16128 /* In a BPABI executable, the dynamic linking sections do not go in
16129 the loadable read-only segment. The post-linker may wish to
16130 refer to these sections, but they are not part of the final
16131 program image. */
16132 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16133 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16134 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16135 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16136 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16137 /* These sections do not need to be writable as the SymbianOS
16138 postlinker will arrange things so that no dynamic relocation is
16139 required. */
16140 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16141 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16142 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16143 { NULL, 0, 0, 0, 0 }
16144 };
16145
16146 static void
16147 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16148 struct bfd_link_info *link_info)
16149 {
16150 /* BPABI objects are never loaded directly by an OS kernel; they are
16151 processed by a postlinker first, into an OS-specific format. If
16152 the D_PAGED bit is set on the file, BFD will align segments on
16153 page boundaries, so that an OS can directly map the file. With
16154 BPABI objects, that just results in wasted space. In addition,
16155 because we clear the D_PAGED bit, map_sections_to_segments will
16156 recognize that the program headers should not be mapped into any
16157 loadable segment. */
16158 abfd->flags &= ~D_PAGED;
16159 elf32_arm_begin_write_processing (abfd, link_info);
16160 }
16161
16162 static bfd_boolean
16163 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16164 struct bfd_link_info *info)
16165 {
16166 struct elf_segment_map *m;
16167 asection *dynsec;
16168
16169 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16170 segment. However, because the .dynamic section is not marked
16171 with SEC_LOAD, the generic ELF code will not create such a
16172 segment. */
16173 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16174 if (dynsec)
16175 {
16176 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
16177 if (m->p_type == PT_DYNAMIC)
16178 break;
16179
16180 if (m == NULL)
16181 {
16182 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16183 m->next = elf_tdata (abfd)->segment_map;
16184 elf_tdata (abfd)->segment_map = m;
16185 }
16186 }
16187
16188 /* Also call the generic arm routine. */
16189 return elf32_arm_modify_segment_map (abfd, info);
16190 }
16191
16192 /* Return address for Ith PLT stub in section PLT, for relocation REL
16193 or (bfd_vma) -1 if it should not be included. */
16194
16195 static bfd_vma
16196 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16197 const arelent *rel ATTRIBUTE_UNUSED)
16198 {
16199 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16200 }
16201
16202
16203 #undef elf32_bed
16204 #define elf32_bed elf32_arm_symbian_bed
16205
16206 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16207 will process them and then discard them. */
16208 #undef ELF_DYNAMIC_SEC_FLAGS
16209 #define ELF_DYNAMIC_SEC_FLAGS \
16210 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16211
16212 #undef elf_backend_emit_relocs
16213
16214 #undef bfd_elf32_bfd_link_hash_table_create
16215 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16216 #undef elf_backend_special_sections
16217 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16218 #undef elf_backend_begin_write_processing
16219 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16220 #undef elf_backend_final_write_processing
16221 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16222
16223 #undef elf_backend_modify_segment_map
16224 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16225
16226 /* There is no .got section for BPABI objects, and hence no header. */
16227 #undef elf_backend_got_header_size
16228 #define elf_backend_got_header_size 0
16229
16230 /* Similarly, there is no .got.plt section. */
16231 #undef elf_backend_want_got_plt
16232 #define elf_backend_want_got_plt 0
16233
16234 #undef elf_backend_plt_sym_val
16235 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16236
16237 #undef elf_backend_may_use_rel_p
16238 #define elf_backend_may_use_rel_p 1
16239 #undef elf_backend_may_use_rela_p
16240 #define elf_backend_may_use_rela_p 0
16241 #undef elf_backend_default_use_rela_p
16242 #define elf_backend_default_use_rela_p 0
16243 #undef elf_backend_want_plt_sym
16244 #define elf_backend_want_plt_sym 0
16245 #undef ELF_MAXPAGESIZE
16246 #define ELF_MAXPAGESIZE 0x8000
16247
16248 #include "elf32-target.h"
This page took 0.416022 seconds and 4 git commands to generate.