[ARM] Fix extern protected data handling
[deliverable/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2015 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 };
1693
1694 /* 160 onwards: */
1695 static reloc_howto_type elf32_arm_howto_table_2[1] =
1696 {
1697 HOWTO (R_ARM_IRELATIVE, /* type */
1698 0, /* rightshift */
1699 2, /* size (0 = byte, 1 = short, 2 = long) */
1700 32, /* bitsize */
1701 FALSE, /* pc_relative */
1702 0, /* bitpos */
1703 complain_overflow_bitfield,/* complain_on_overflow */
1704 bfd_elf_generic_reloc, /* special_function */
1705 "R_ARM_IRELATIVE", /* name */
1706 TRUE, /* partial_inplace */
1707 0xffffffff, /* src_mask */
1708 0xffffffff, /* dst_mask */
1709 FALSE) /* pcrel_offset */
1710 };
1711
1712 /* 249-255 extended, currently unused, relocations: */
1713 static reloc_howto_type elf32_arm_howto_table_3[4] =
1714 {
1715 HOWTO (R_ARM_RREL32, /* type */
1716 0, /* rightshift */
1717 0, /* size (0 = byte, 1 = short, 2 = long) */
1718 0, /* bitsize */
1719 FALSE, /* pc_relative */
1720 0, /* bitpos */
1721 complain_overflow_dont,/* complain_on_overflow */
1722 bfd_elf_generic_reloc, /* special_function */
1723 "R_ARM_RREL32", /* name */
1724 FALSE, /* partial_inplace */
1725 0, /* src_mask */
1726 0, /* dst_mask */
1727 FALSE), /* pcrel_offset */
1728
1729 HOWTO (R_ARM_RABS32, /* type */
1730 0, /* rightshift */
1731 0, /* size (0 = byte, 1 = short, 2 = long) */
1732 0, /* bitsize */
1733 FALSE, /* pc_relative */
1734 0, /* bitpos */
1735 complain_overflow_dont,/* complain_on_overflow */
1736 bfd_elf_generic_reloc, /* special_function */
1737 "R_ARM_RABS32", /* name */
1738 FALSE, /* partial_inplace */
1739 0, /* src_mask */
1740 0, /* dst_mask */
1741 FALSE), /* pcrel_offset */
1742
1743 HOWTO (R_ARM_RPC24, /* type */
1744 0, /* rightshift */
1745 0, /* size (0 = byte, 1 = short, 2 = long) */
1746 0, /* bitsize */
1747 FALSE, /* pc_relative */
1748 0, /* bitpos */
1749 complain_overflow_dont,/* complain_on_overflow */
1750 bfd_elf_generic_reloc, /* special_function */
1751 "R_ARM_RPC24", /* name */
1752 FALSE, /* partial_inplace */
1753 0, /* src_mask */
1754 0, /* dst_mask */
1755 FALSE), /* pcrel_offset */
1756
1757 HOWTO (R_ARM_RBASE, /* type */
1758 0, /* rightshift */
1759 0, /* size (0 = byte, 1 = short, 2 = long) */
1760 0, /* bitsize */
1761 FALSE, /* pc_relative */
1762 0, /* bitpos */
1763 complain_overflow_dont,/* complain_on_overflow */
1764 bfd_elf_generic_reloc, /* special_function */
1765 "R_ARM_RBASE", /* name */
1766 FALSE, /* partial_inplace */
1767 0, /* src_mask */
1768 0, /* dst_mask */
1769 FALSE) /* pcrel_offset */
1770 };
1771
1772 static reloc_howto_type *
1773 elf32_arm_howto_from_type (unsigned int r_type)
1774 {
1775 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1776 return &elf32_arm_howto_table_1[r_type];
1777
1778 if (r_type == R_ARM_IRELATIVE)
1779 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1780
1781 if (r_type >= R_ARM_RREL32
1782 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1783 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1784
1785 return NULL;
1786 }
1787
1788 static void
1789 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1790 Elf_Internal_Rela * elf_reloc)
1791 {
1792 unsigned int r_type;
1793
1794 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1795 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1796 }
1797
1798 struct elf32_arm_reloc_map
1799 {
1800 bfd_reloc_code_real_type bfd_reloc_val;
1801 unsigned char elf_reloc_val;
1802 };
1803
1804 /* All entries in this list must also be present in elf32_arm_howto_table. */
1805 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1806 {
1807 {BFD_RELOC_NONE, R_ARM_NONE},
1808 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1809 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1810 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1811 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1812 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1813 {BFD_RELOC_32, R_ARM_ABS32},
1814 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1815 {BFD_RELOC_8, R_ARM_ABS8},
1816 {BFD_RELOC_16, R_ARM_ABS16},
1817 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1818 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1823 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1824 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1825 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1826 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1827 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1828 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1829 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1830 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1831 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1832 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1833 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1834 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1835 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1836 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1837 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1838 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1839 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1840 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1841 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1842 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1843 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1844 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1845 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1846 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1847 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1848 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1849 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1850 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1851 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1852 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1853 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1854 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1855 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1856 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1857 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1858 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1859 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1860 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1861 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1862 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1863 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1864 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1865 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1866 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1867 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1868 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1869 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1870 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1871 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1872 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1873 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1874 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1875 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1876 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1877 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1878 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1879 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1880 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1881 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1882 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1883 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1884 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1885 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1886 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1887 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1888 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1889 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1890 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1891 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1892 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1893 };
1894
1895 static reloc_howto_type *
1896 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1897 bfd_reloc_code_real_type code)
1898 {
1899 unsigned int i;
1900
1901 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1902 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1903 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1904
1905 return NULL;
1906 }
1907
1908 static reloc_howto_type *
1909 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1910 const char *r_name)
1911 {
1912 unsigned int i;
1913
1914 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1915 if (elf32_arm_howto_table_1[i].name != NULL
1916 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1917 return &elf32_arm_howto_table_1[i];
1918
1919 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1920 if (elf32_arm_howto_table_2[i].name != NULL
1921 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1922 return &elf32_arm_howto_table_2[i];
1923
1924 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1925 if (elf32_arm_howto_table_3[i].name != NULL
1926 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1927 return &elf32_arm_howto_table_3[i];
1928
1929 return NULL;
1930 }
1931
1932 /* Support for core dump NOTE sections. */
1933
1934 static bfd_boolean
1935 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1936 {
1937 int offset;
1938 size_t size;
1939
1940 switch (note->descsz)
1941 {
1942 default:
1943 return FALSE;
1944
1945 case 148: /* Linux/ARM 32-bit. */
1946 /* pr_cursig */
1947 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
1948
1949 /* pr_pid */
1950 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
1951
1952 /* pr_reg */
1953 offset = 72;
1954 size = 72;
1955
1956 break;
1957 }
1958
1959 /* Make a ".reg/999" section. */
1960 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1961 size, note->descpos + offset);
1962 }
1963
1964 static bfd_boolean
1965 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1966 {
1967 switch (note->descsz)
1968 {
1969 default:
1970 return FALSE;
1971
1972 case 124: /* Linux/ARM elf_prpsinfo. */
1973 elf_tdata (abfd)->core->pid
1974 = bfd_get_32 (abfd, note->descdata + 12);
1975 elf_tdata (abfd)->core->program
1976 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1977 elf_tdata (abfd)->core->command
1978 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1979 }
1980
1981 /* Note that for some reason, a spurious space is tacked
1982 onto the end of the args in some (at least one anyway)
1983 implementations, so strip it off if it exists. */
1984 {
1985 char *command = elf_tdata (abfd)->core->command;
1986 int n = strlen (command);
1987
1988 if (0 < n && command[n - 1] == ' ')
1989 command[n - 1] = '\0';
1990 }
1991
1992 return TRUE;
1993 }
1994
1995 static char *
1996 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
1997 int note_type, ...)
1998 {
1999 switch (note_type)
2000 {
2001 default:
2002 return NULL;
2003
2004 case NT_PRPSINFO:
2005 {
2006 char data[124];
2007 va_list ap;
2008
2009 va_start (ap, note_type);
2010 memset (data, 0, sizeof (data));
2011 strncpy (data + 28, va_arg (ap, const char *), 16);
2012 strncpy (data + 44, va_arg (ap, const char *), 80);
2013 va_end (ap);
2014
2015 return elfcore_write_note (abfd, buf, bufsiz,
2016 "CORE", note_type, data, sizeof (data));
2017 }
2018
2019 case NT_PRSTATUS:
2020 {
2021 char data[148];
2022 va_list ap;
2023 long pid;
2024 int cursig;
2025 const void *greg;
2026
2027 va_start (ap, note_type);
2028 memset (data, 0, sizeof (data));
2029 pid = va_arg (ap, long);
2030 bfd_put_32 (abfd, pid, data + 24);
2031 cursig = va_arg (ap, int);
2032 bfd_put_16 (abfd, cursig, data + 12);
2033 greg = va_arg (ap, const void *);
2034 memcpy (data + 72, greg, 72);
2035 va_end (ap);
2036
2037 return elfcore_write_note (abfd, buf, bufsiz,
2038 "CORE", note_type, data, sizeof (data));
2039 }
2040 }
2041 }
2042
2043 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2044 #define TARGET_LITTLE_NAME "elf32-littlearm"
2045 #define TARGET_BIG_SYM arm_elf32_be_vec
2046 #define TARGET_BIG_NAME "elf32-bigarm"
2047
2048 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2049 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2050 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2051
2052 typedef unsigned long int insn32;
2053 typedef unsigned short int insn16;
2054
2055 /* In lieu of proper flags, assume all EABIv4 or later objects are
2056 interworkable. */
2057 #define INTERWORK_FLAG(abfd) \
2058 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2059 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2060 || ((abfd)->flags & BFD_LINKER_CREATED))
2061
2062 /* The linker script knows the section names for placement.
2063 The entry_names are used to do simple name mangling on the stubs.
2064 Given a function name, and its type, the stub can be found. The
2065 name can be changed. The only requirement is the %s be present. */
2066 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2067 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2068
2069 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2070 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2071
2072 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2073 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2074
2075 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2076 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2077
2078 #define STUB_ENTRY_NAME "__%s_veneer"
2079
2080 /* The name of the dynamic interpreter. This is put in the .interp
2081 section. */
2082 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2083
2084 static const unsigned long tls_trampoline [] =
2085 {
2086 0xe08e0000, /* add r0, lr, r0 */
2087 0xe5901004, /* ldr r1, [r0,#4] */
2088 0xe12fff11, /* bx r1 */
2089 };
2090
2091 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2092 {
2093 0xe52d2004, /* push {r2} */
2094 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2095 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2096 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2097 0xe081100f, /* 2: add r1, pc */
2098 0xe12fff12, /* bx r2 */
2099 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2100 + dl_tlsdesc_lazy_resolver(GOT) */
2101 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2102 };
2103
2104 #ifdef FOUR_WORD_PLT
2105
2106 /* The first entry in a procedure linkage table looks like
2107 this. It is set up so that any shared library function that is
2108 called before the relocation has been set up calls the dynamic
2109 linker first. */
2110 static const bfd_vma elf32_arm_plt0_entry [] =
2111 {
2112 0xe52de004, /* str lr, [sp, #-4]! */
2113 0xe59fe010, /* ldr lr, [pc, #16] */
2114 0xe08fe00e, /* add lr, pc, lr */
2115 0xe5bef008, /* ldr pc, [lr, #8]! */
2116 };
2117
2118 /* Subsequent entries in a procedure linkage table look like
2119 this. */
2120 static const bfd_vma elf32_arm_plt_entry [] =
2121 {
2122 0xe28fc600, /* add ip, pc, #NN */
2123 0xe28cca00, /* add ip, ip, #NN */
2124 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2125 0x00000000, /* unused */
2126 };
2127
2128 #else /* not FOUR_WORD_PLT */
2129
2130 /* The first entry in a procedure linkage table looks like
2131 this. It is set up so that any shared library function that is
2132 called before the relocation has been set up calls the dynamic
2133 linker first. */
2134 static const bfd_vma elf32_arm_plt0_entry [] =
2135 {
2136 0xe52de004, /* str lr, [sp, #-4]! */
2137 0xe59fe004, /* ldr lr, [pc, #4] */
2138 0xe08fe00e, /* add lr, pc, lr */
2139 0xe5bef008, /* ldr pc, [lr, #8]! */
2140 0x00000000, /* &GOT[0] - . */
2141 };
2142
2143 /* By default subsequent entries in a procedure linkage table look like
2144 this. Offsets that don't fit into 28 bits will cause link error. */
2145 static const bfd_vma elf32_arm_plt_entry_short [] =
2146 {
2147 0xe28fc600, /* add ip, pc, #0xNN00000 */
2148 0xe28cca00, /* add ip, ip, #0xNN000 */
2149 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2150 };
2151
2152 /* When explicitly asked, we'll use this "long" entry format
2153 which can cope with arbitrary displacements. */
2154 static const bfd_vma elf32_arm_plt_entry_long [] =
2155 {
2156 0xe28fc200, /* add ip, pc, #0xN0000000 */
2157 0xe28cc600, /* add ip, ip, #0xNN00000 */
2158 0xe28cca00, /* add ip, ip, #0xNN000 */
2159 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2160 };
2161
2162 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2163
2164 #endif /* not FOUR_WORD_PLT */
2165
2166 /* The first entry in a procedure linkage table looks like this.
2167 It is set up so that any shared library function that is called before the
2168 relocation has been set up calls the dynamic linker first. */
2169 static const bfd_vma elf32_thumb2_plt0_entry [] =
2170 {
2171 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2172 an instruction maybe encoded to one or two array elements. */
2173 0xf8dfb500, /* push {lr} */
2174 0x44fee008, /* ldr.w lr, [pc, #8] */
2175 /* add lr, pc */
2176 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2177 0x00000000, /* &GOT[0] - . */
2178 };
2179
2180 /* Subsequent entries in a procedure linkage table for thumb only target
2181 look like this. */
2182 static const bfd_vma elf32_thumb2_plt_entry [] =
2183 {
2184 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2185 an instruction maybe encoded to one or two array elements. */
2186 0x0c00f240, /* movw ip, #0xNNNN */
2187 0x0c00f2c0, /* movt ip, #0xNNNN */
2188 0xf8dc44fc, /* add ip, pc */
2189 0xbf00f000 /* ldr.w pc, [ip] */
2190 /* nop */
2191 };
2192
2193 /* The format of the first entry in the procedure linkage table
2194 for a VxWorks executable. */
2195 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2196 {
2197 0xe52dc008, /* str ip,[sp,#-8]! */
2198 0xe59fc000, /* ldr ip,[pc] */
2199 0xe59cf008, /* ldr pc,[ip,#8] */
2200 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2201 };
2202
2203 /* The format of subsequent entries in a VxWorks executable. */
2204 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2205 {
2206 0xe59fc000, /* ldr ip,[pc] */
2207 0xe59cf000, /* ldr pc,[ip] */
2208 0x00000000, /* .long @got */
2209 0xe59fc000, /* ldr ip,[pc] */
2210 0xea000000, /* b _PLT */
2211 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2212 };
2213
2214 /* The format of entries in a VxWorks shared library. */
2215 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2216 {
2217 0xe59fc000, /* ldr ip,[pc] */
2218 0xe79cf009, /* ldr pc,[ip,r9] */
2219 0x00000000, /* .long @got */
2220 0xe59fc000, /* ldr ip,[pc] */
2221 0xe599f008, /* ldr pc,[r9,#8] */
2222 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2223 };
2224
2225 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2226 #define PLT_THUMB_STUB_SIZE 4
2227 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2228 {
2229 0x4778, /* bx pc */
2230 0x46c0 /* nop */
2231 };
2232
2233 /* The entries in a PLT when using a DLL-based target with multiple
2234 address spaces. */
2235 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2236 {
2237 0xe51ff004, /* ldr pc, [pc, #-4] */
2238 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2239 };
2240
2241 /* The first entry in a procedure linkage table looks like
2242 this. It is set up so that any shared library function that is
2243 called before the relocation has been set up calls the dynamic
2244 linker first. */
2245 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2246 {
2247 /* First bundle: */
2248 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2249 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2250 0xe08cc00f, /* add ip, ip, pc */
2251 0xe52dc008, /* str ip, [sp, #-8]! */
2252 /* Second bundle: */
2253 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2254 0xe59cc000, /* ldr ip, [ip] */
2255 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2256 0xe12fff1c, /* bx ip */
2257 /* Third bundle: */
2258 0xe320f000, /* nop */
2259 0xe320f000, /* nop */
2260 0xe320f000, /* nop */
2261 /* .Lplt_tail: */
2262 0xe50dc004, /* str ip, [sp, #-4] */
2263 /* Fourth bundle: */
2264 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2265 0xe59cc000, /* ldr ip, [ip] */
2266 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2267 0xe12fff1c, /* bx ip */
2268 };
2269 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2270
2271 /* Subsequent entries in a procedure linkage table look like this. */
2272 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2273 {
2274 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2275 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2276 0xe08cc00f, /* add ip, ip, pc */
2277 0xea000000, /* b .Lplt_tail */
2278 };
2279
2280 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2281 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2282 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2283 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2284 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2285 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2286 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2287 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2288
2289 enum stub_insn_type
2290 {
2291 THUMB16_TYPE = 1,
2292 THUMB32_TYPE,
2293 ARM_TYPE,
2294 DATA_TYPE
2295 };
2296
2297 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2298 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2299 is inserted in arm_build_one_stub(). */
2300 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2301 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2302 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2303 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2304 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2305 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2306
2307 typedef struct
2308 {
2309 bfd_vma data;
2310 enum stub_insn_type type;
2311 unsigned int r_type;
2312 int reloc_addend;
2313 } insn_sequence;
2314
2315 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2316 to reach the stub if necessary. */
2317 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2318 {
2319 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2320 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2321 };
2322
2323 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2324 available. */
2325 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2326 {
2327 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2328 ARM_INSN (0xe12fff1c), /* bx ip */
2329 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2330 };
2331
2332 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2333 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2334 {
2335 THUMB16_INSN (0xb401), /* push {r0} */
2336 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2337 THUMB16_INSN (0x4684), /* mov ip, r0 */
2338 THUMB16_INSN (0xbc01), /* pop {r0} */
2339 THUMB16_INSN (0x4760), /* bx ip */
2340 THUMB16_INSN (0xbf00), /* nop */
2341 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2342 };
2343
2344 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2345 allowed. */
2346 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2347 {
2348 THUMB16_INSN (0x4778), /* bx pc */
2349 THUMB16_INSN (0x46c0), /* nop */
2350 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2351 ARM_INSN (0xe12fff1c), /* bx ip */
2352 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2353 };
2354
2355 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2356 available. */
2357 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2358 {
2359 THUMB16_INSN (0x4778), /* bx pc */
2360 THUMB16_INSN (0x46c0), /* nop */
2361 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2362 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2363 };
2364
2365 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2366 one, when the destination is close enough. */
2367 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2368 {
2369 THUMB16_INSN (0x4778), /* bx pc */
2370 THUMB16_INSN (0x46c0), /* nop */
2371 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2372 };
2373
2374 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2375 blx to reach the stub if necessary. */
2376 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2377 {
2378 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2379 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2380 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2381 };
2382
2383 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2384 blx to reach the stub if necessary. We can not add into pc;
2385 it is not guaranteed to mode switch (different in ARMv6 and
2386 ARMv7). */
2387 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2388 {
2389 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2390 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2391 ARM_INSN (0xe12fff1c), /* bx ip */
2392 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2393 };
2394
2395 /* V4T ARM -> ARM long branch stub, PIC. */
2396 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2397 {
2398 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2399 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2400 ARM_INSN (0xe12fff1c), /* bx ip */
2401 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2402 };
2403
2404 /* V4T Thumb -> ARM long branch stub, PIC. */
2405 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2406 {
2407 THUMB16_INSN (0x4778), /* bx pc */
2408 THUMB16_INSN (0x46c0), /* nop */
2409 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2410 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2411 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2412 };
2413
2414 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2415 architectures. */
2416 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2417 {
2418 THUMB16_INSN (0xb401), /* push {r0} */
2419 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2420 THUMB16_INSN (0x46fc), /* mov ip, pc */
2421 THUMB16_INSN (0x4484), /* add ip, r0 */
2422 THUMB16_INSN (0xbc01), /* pop {r0} */
2423 THUMB16_INSN (0x4760), /* bx ip */
2424 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2425 };
2426
2427 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2428 allowed. */
2429 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2430 {
2431 THUMB16_INSN (0x4778), /* bx pc */
2432 THUMB16_INSN (0x46c0), /* nop */
2433 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2434 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2435 ARM_INSN (0xe12fff1c), /* bx ip */
2436 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2437 };
2438
2439 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2440 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2441 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2442 {
2443 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2444 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2445 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2446 };
2447
2448 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2449 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2450 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2451 {
2452 THUMB16_INSN (0x4778), /* bx pc */
2453 THUMB16_INSN (0x46c0), /* nop */
2454 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2455 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2456 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2457 };
2458
2459 /* NaCl ARM -> ARM long branch stub. */
2460 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2461 {
2462 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2463 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2464 ARM_INSN (0xe12fff1c), /* bx ip */
2465 ARM_INSN (0xe320f000), /* nop */
2466 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2467 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2468 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2469 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2470 };
2471
2472 /* NaCl ARM -> ARM long branch stub, PIC. */
2473 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2474 {
2475 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2476 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2477 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2478 ARM_INSN (0xe12fff1c), /* bx ip */
2479 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2480 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2481 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2482 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2483 };
2484
2485
2486 /* Cortex-A8 erratum-workaround stubs. */
2487
2488 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2489 can't use a conditional branch to reach this stub). */
2490
2491 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2492 {
2493 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2494 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2495 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2496 };
2497
2498 /* Stub used for b.w and bl.w instructions. */
2499
2500 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2501 {
2502 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2503 };
2504
2505 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2506 {
2507 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2508 };
2509
2510 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2511 instruction (which switches to ARM mode) to point to this stub. Jump to the
2512 real destination using an ARM-mode branch. */
2513
2514 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2515 {
2516 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2517 };
2518
2519 /* For each section group there can be a specially created linker section
2520 to hold the stubs for that group. The name of the stub section is based
2521 upon the name of another section within that group with the suffix below
2522 applied.
2523
2524 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2525 create what appeared to be a linker stub section when it actually
2526 contained user code/data. For example, consider this fragment:
2527
2528 const char * stubborn_problems[] = { "np" };
2529
2530 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2531 section called:
2532
2533 .data.rel.local.stubborn_problems
2534
2535 This then causes problems in arm32_arm_build_stubs() as it triggers:
2536
2537 // Ignore non-stub sections.
2538 if (!strstr (stub_sec->name, STUB_SUFFIX))
2539 continue;
2540
2541 And so the section would be ignored instead of being processed. Hence
2542 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2543 C identifier. */
2544 #define STUB_SUFFIX ".__stub"
2545
2546 /* One entry per long/short branch stub defined above. */
2547 #define DEF_STUBS \
2548 DEF_STUB(long_branch_any_any) \
2549 DEF_STUB(long_branch_v4t_arm_thumb) \
2550 DEF_STUB(long_branch_thumb_only) \
2551 DEF_STUB(long_branch_v4t_thumb_thumb) \
2552 DEF_STUB(long_branch_v4t_thumb_arm) \
2553 DEF_STUB(short_branch_v4t_thumb_arm) \
2554 DEF_STUB(long_branch_any_arm_pic) \
2555 DEF_STUB(long_branch_any_thumb_pic) \
2556 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2557 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2558 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2559 DEF_STUB(long_branch_thumb_only_pic) \
2560 DEF_STUB(long_branch_any_tls_pic) \
2561 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2562 DEF_STUB(long_branch_arm_nacl) \
2563 DEF_STUB(long_branch_arm_nacl_pic) \
2564 DEF_STUB(a8_veneer_b_cond) \
2565 DEF_STUB(a8_veneer_b) \
2566 DEF_STUB(a8_veneer_bl) \
2567 DEF_STUB(a8_veneer_blx)
2568
2569 #define DEF_STUB(x) arm_stub_##x,
2570 enum elf32_arm_stub_type
2571 {
2572 arm_stub_none,
2573 DEF_STUBS
2574 /* Note the first a8_veneer type. */
2575 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2576 };
2577 #undef DEF_STUB
2578
2579 typedef struct
2580 {
2581 const insn_sequence* template_sequence;
2582 int template_size;
2583 } stub_def;
2584
2585 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2586 static const stub_def stub_definitions[] =
2587 {
2588 {NULL, 0},
2589 DEF_STUBS
2590 };
2591
2592 struct elf32_arm_stub_hash_entry
2593 {
2594 /* Base hash table entry structure. */
2595 struct bfd_hash_entry root;
2596
2597 /* The stub section. */
2598 asection *stub_sec;
2599
2600 /* Offset within stub_sec of the beginning of this stub. */
2601 bfd_vma stub_offset;
2602
2603 /* Given the symbol's value and its section we can determine its final
2604 value when building the stubs (so the stub knows where to jump). */
2605 bfd_vma target_value;
2606 asection *target_section;
2607
2608 /* Offset to apply to relocation referencing target_value. */
2609 bfd_vma target_addend;
2610
2611 /* The instruction which caused this stub to be generated (only valid for
2612 Cortex-A8 erratum workaround stubs at present). */
2613 unsigned long orig_insn;
2614
2615 /* The stub type. */
2616 enum elf32_arm_stub_type stub_type;
2617 /* Its encoding size in bytes. */
2618 int stub_size;
2619 /* Its template. */
2620 const insn_sequence *stub_template;
2621 /* The size of the template (number of entries). */
2622 int stub_template_size;
2623
2624 /* The symbol table entry, if any, that this was derived from. */
2625 struct elf32_arm_link_hash_entry *h;
2626
2627 /* Type of branch. */
2628 enum arm_st_branch_type branch_type;
2629
2630 /* Where this stub is being called from, or, in the case of combined
2631 stub sections, the first input section in the group. */
2632 asection *id_sec;
2633
2634 /* The name for the local symbol at the start of this stub. The
2635 stub name in the hash table has to be unique; this does not, so
2636 it can be friendlier. */
2637 char *output_name;
2638 };
2639
2640 /* Used to build a map of a section. This is required for mixed-endian
2641 code/data. */
2642
2643 typedef struct elf32_elf_section_map
2644 {
2645 bfd_vma vma;
2646 char type;
2647 }
2648 elf32_arm_section_map;
2649
2650 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2651
2652 typedef enum
2653 {
2654 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2655 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2656 VFP11_ERRATUM_ARM_VENEER,
2657 VFP11_ERRATUM_THUMB_VENEER
2658 }
2659 elf32_vfp11_erratum_type;
2660
2661 typedef struct elf32_vfp11_erratum_list
2662 {
2663 struct elf32_vfp11_erratum_list *next;
2664 bfd_vma vma;
2665 union
2666 {
2667 struct
2668 {
2669 struct elf32_vfp11_erratum_list *veneer;
2670 unsigned int vfp_insn;
2671 } b;
2672 struct
2673 {
2674 struct elf32_vfp11_erratum_list *branch;
2675 unsigned int id;
2676 } v;
2677 } u;
2678 elf32_vfp11_erratum_type type;
2679 }
2680 elf32_vfp11_erratum_list;
2681
2682 typedef enum
2683 {
2684 DELETE_EXIDX_ENTRY,
2685 INSERT_EXIDX_CANTUNWIND_AT_END
2686 }
2687 arm_unwind_edit_type;
2688
2689 /* A (sorted) list of edits to apply to an unwind table. */
2690 typedef struct arm_unwind_table_edit
2691 {
2692 arm_unwind_edit_type type;
2693 /* Note: we sometimes want to insert an unwind entry corresponding to a
2694 section different from the one we're currently writing out, so record the
2695 (text) section this edit relates to here. */
2696 asection *linked_section;
2697 unsigned int index;
2698 struct arm_unwind_table_edit *next;
2699 }
2700 arm_unwind_table_edit;
2701
2702 typedef struct _arm_elf_section_data
2703 {
2704 /* Information about mapping symbols. */
2705 struct bfd_elf_section_data elf;
2706 unsigned int mapcount;
2707 unsigned int mapsize;
2708 elf32_arm_section_map *map;
2709 /* Information about CPU errata. */
2710 unsigned int erratumcount;
2711 elf32_vfp11_erratum_list *erratumlist;
2712 /* Information about unwind tables. */
2713 union
2714 {
2715 /* Unwind info attached to a text section. */
2716 struct
2717 {
2718 asection *arm_exidx_sec;
2719 } text;
2720
2721 /* Unwind info attached to an .ARM.exidx section. */
2722 struct
2723 {
2724 arm_unwind_table_edit *unwind_edit_list;
2725 arm_unwind_table_edit *unwind_edit_tail;
2726 } exidx;
2727 } u;
2728 }
2729 _arm_elf_section_data;
2730
2731 #define elf32_arm_section_data(sec) \
2732 ((_arm_elf_section_data *) elf_section_data (sec))
2733
2734 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2735 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2736 so may be created multiple times: we use an array of these entries whilst
2737 relaxing which we can refresh easily, then create stubs for each potentially
2738 erratum-triggering instruction once we've settled on a solution. */
2739
2740 struct a8_erratum_fix
2741 {
2742 bfd *input_bfd;
2743 asection *section;
2744 bfd_vma offset;
2745 bfd_vma addend;
2746 unsigned long orig_insn;
2747 char *stub_name;
2748 enum elf32_arm_stub_type stub_type;
2749 enum arm_st_branch_type branch_type;
2750 };
2751
2752 /* A table of relocs applied to branches which might trigger Cortex-A8
2753 erratum. */
2754
2755 struct a8_erratum_reloc
2756 {
2757 bfd_vma from;
2758 bfd_vma destination;
2759 struct elf32_arm_link_hash_entry *hash;
2760 const char *sym_name;
2761 unsigned int r_type;
2762 enum arm_st_branch_type branch_type;
2763 bfd_boolean non_a8_stub;
2764 };
2765
2766 /* The size of the thread control block. */
2767 #define TCB_SIZE 8
2768
2769 /* ARM-specific information about a PLT entry, over and above the usual
2770 gotplt_union. */
2771 struct arm_plt_info
2772 {
2773 /* We reference count Thumb references to a PLT entry separately,
2774 so that we can emit the Thumb trampoline only if needed. */
2775 bfd_signed_vma thumb_refcount;
2776
2777 /* Some references from Thumb code may be eliminated by BL->BLX
2778 conversion, so record them separately. */
2779 bfd_signed_vma maybe_thumb_refcount;
2780
2781 /* How many of the recorded PLT accesses were from non-call relocations.
2782 This information is useful when deciding whether anything takes the
2783 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2784 non-call references to the function should resolve directly to the
2785 real runtime target. */
2786 unsigned int noncall_refcount;
2787
2788 /* Since PLT entries have variable size if the Thumb prologue is
2789 used, we need to record the index into .got.plt instead of
2790 recomputing it from the PLT offset. */
2791 bfd_signed_vma got_offset;
2792 };
2793
2794 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2795 struct arm_local_iplt_info
2796 {
2797 /* The information that is usually found in the generic ELF part of
2798 the hash table entry. */
2799 union gotplt_union root;
2800
2801 /* The information that is usually found in the ARM-specific part of
2802 the hash table entry. */
2803 struct arm_plt_info arm;
2804
2805 /* A list of all potential dynamic relocations against this symbol. */
2806 struct elf_dyn_relocs *dyn_relocs;
2807 };
2808
2809 struct elf_arm_obj_tdata
2810 {
2811 struct elf_obj_tdata root;
2812
2813 /* tls_type for each local got entry. */
2814 char *local_got_tls_type;
2815
2816 /* GOTPLT entries for TLS descriptors. */
2817 bfd_vma *local_tlsdesc_gotent;
2818
2819 /* Information for local symbols that need entries in .iplt. */
2820 struct arm_local_iplt_info **local_iplt;
2821
2822 /* Zero to warn when linking objects with incompatible enum sizes. */
2823 int no_enum_size_warning;
2824
2825 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2826 int no_wchar_size_warning;
2827 };
2828
2829 #define elf_arm_tdata(bfd) \
2830 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2831
2832 #define elf32_arm_local_got_tls_type(bfd) \
2833 (elf_arm_tdata (bfd)->local_got_tls_type)
2834
2835 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2836 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2837
2838 #define elf32_arm_local_iplt(bfd) \
2839 (elf_arm_tdata (bfd)->local_iplt)
2840
2841 #define is_arm_elf(bfd) \
2842 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2843 && elf_tdata (bfd) != NULL \
2844 && elf_object_id (bfd) == ARM_ELF_DATA)
2845
2846 static bfd_boolean
2847 elf32_arm_mkobject (bfd *abfd)
2848 {
2849 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2850 ARM_ELF_DATA);
2851 }
2852
2853 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2854
2855 /* Arm ELF linker hash entry. */
2856 struct elf32_arm_link_hash_entry
2857 {
2858 struct elf_link_hash_entry root;
2859
2860 /* Track dynamic relocs copied for this symbol. */
2861 struct elf_dyn_relocs *dyn_relocs;
2862
2863 /* ARM-specific PLT information. */
2864 struct arm_plt_info plt;
2865
2866 #define GOT_UNKNOWN 0
2867 #define GOT_NORMAL 1
2868 #define GOT_TLS_GD 2
2869 #define GOT_TLS_IE 4
2870 #define GOT_TLS_GDESC 8
2871 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2872 unsigned int tls_type : 8;
2873
2874 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2875 unsigned int is_iplt : 1;
2876
2877 unsigned int unused : 23;
2878
2879 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2880 starting at the end of the jump table. */
2881 bfd_vma tlsdesc_got;
2882
2883 /* The symbol marking the real symbol location for exported thumb
2884 symbols with Arm stubs. */
2885 struct elf_link_hash_entry *export_glue;
2886
2887 /* A pointer to the most recently used stub hash entry against this
2888 symbol. */
2889 struct elf32_arm_stub_hash_entry *stub_cache;
2890 };
2891
2892 /* Traverse an arm ELF linker hash table. */
2893 #define elf32_arm_link_hash_traverse(table, func, info) \
2894 (elf_link_hash_traverse \
2895 (&(table)->root, \
2896 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2897 (info)))
2898
2899 /* Get the ARM elf linker hash table from a link_info structure. */
2900 #define elf32_arm_hash_table(info) \
2901 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2902 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2903
2904 #define arm_stub_hash_lookup(table, string, create, copy) \
2905 ((struct elf32_arm_stub_hash_entry *) \
2906 bfd_hash_lookup ((table), (string), (create), (copy)))
2907
2908 /* Array to keep track of which stub sections have been created, and
2909 information on stub grouping. */
2910 struct map_stub
2911 {
2912 /* This is the section to which stubs in the group will be
2913 attached. */
2914 asection *link_sec;
2915 /* The stub section. */
2916 asection *stub_sec;
2917 };
2918
2919 #define elf32_arm_compute_jump_table_size(htab) \
2920 ((htab)->next_tls_desc_index * 4)
2921
2922 /* ARM ELF linker hash table. */
2923 struct elf32_arm_link_hash_table
2924 {
2925 /* The main hash table. */
2926 struct elf_link_hash_table root;
2927
2928 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2929 bfd_size_type thumb_glue_size;
2930
2931 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2932 bfd_size_type arm_glue_size;
2933
2934 /* The size in bytes of section containing the ARMv4 BX veneers. */
2935 bfd_size_type bx_glue_size;
2936
2937 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2938 veneer has been populated. */
2939 bfd_vma bx_glue_offset[15];
2940
2941 /* The size in bytes of the section containing glue for VFP11 erratum
2942 veneers. */
2943 bfd_size_type vfp11_erratum_glue_size;
2944
2945 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2946 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2947 elf32_arm_write_section(). */
2948 struct a8_erratum_fix *a8_erratum_fixes;
2949 unsigned int num_a8_erratum_fixes;
2950
2951 /* An arbitrary input BFD chosen to hold the glue sections. */
2952 bfd * bfd_of_glue_owner;
2953
2954 /* Nonzero to output a BE8 image. */
2955 int byteswap_code;
2956
2957 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2958 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2959 int target1_is_rel;
2960
2961 /* The relocation to use for R_ARM_TARGET2 relocations. */
2962 int target2_reloc;
2963
2964 /* 0 = Ignore R_ARM_V4BX.
2965 1 = Convert BX to MOV PC.
2966 2 = Generate v4 interworing stubs. */
2967 int fix_v4bx;
2968
2969 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2970 int fix_cortex_a8;
2971
2972 /* Whether we should fix the ARM1176 BLX immediate issue. */
2973 int fix_arm1176;
2974
2975 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2976 int use_blx;
2977
2978 /* What sort of code sequences we should look for which may trigger the
2979 VFP11 denorm erratum. */
2980 bfd_arm_vfp11_fix vfp11_fix;
2981
2982 /* Global counter for the number of fixes we have emitted. */
2983 int num_vfp11_fixes;
2984
2985 /* Nonzero to force PIC branch veneers. */
2986 int pic_veneer;
2987
2988 /* The number of bytes in the initial entry in the PLT. */
2989 bfd_size_type plt_header_size;
2990
2991 /* The number of bytes in the subsequent PLT etries. */
2992 bfd_size_type plt_entry_size;
2993
2994 /* True if the target system is VxWorks. */
2995 int vxworks_p;
2996
2997 /* True if the target system is Symbian OS. */
2998 int symbian_p;
2999
3000 /* True if the target system is Native Client. */
3001 int nacl_p;
3002
3003 /* True if the target uses REL relocations. */
3004 int use_rel;
3005
3006 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3007 bfd_vma next_tls_desc_index;
3008
3009 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3010 bfd_vma num_tls_desc;
3011
3012 /* Short-cuts to get to dynamic linker sections. */
3013 asection *sdynbss;
3014 asection *srelbss;
3015
3016 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3017 asection *srelplt2;
3018
3019 /* The offset into splt of the PLT entry for the TLS descriptor
3020 resolver. Special values are 0, if not necessary (or not found
3021 to be necessary yet), and -1 if needed but not determined
3022 yet. */
3023 bfd_vma dt_tlsdesc_plt;
3024
3025 /* The offset into sgot of the GOT entry used by the PLT entry
3026 above. */
3027 bfd_vma dt_tlsdesc_got;
3028
3029 /* Offset in .plt section of tls_arm_trampoline. */
3030 bfd_vma tls_trampoline;
3031
3032 /* Data for R_ARM_TLS_LDM32 relocations. */
3033 union
3034 {
3035 bfd_signed_vma refcount;
3036 bfd_vma offset;
3037 } tls_ldm_got;
3038
3039 /* Small local sym cache. */
3040 struct sym_cache sym_cache;
3041
3042 /* For convenience in allocate_dynrelocs. */
3043 bfd * obfd;
3044
3045 /* The amount of space used by the reserved portion of the sgotplt
3046 section, plus whatever space is used by the jump slots. */
3047 bfd_vma sgotplt_jump_table_size;
3048
3049 /* The stub hash table. */
3050 struct bfd_hash_table stub_hash_table;
3051
3052 /* Linker stub bfd. */
3053 bfd *stub_bfd;
3054
3055 /* Linker call-backs. */
3056 asection * (*add_stub_section) (const char *, asection *, unsigned int);
3057 void (*layout_sections_again) (void);
3058
3059 /* Array to keep track of which stub sections have been created, and
3060 information on stub grouping. */
3061 struct map_stub *stub_group;
3062
3063 /* Number of elements in stub_group. */
3064 int top_id;
3065
3066 /* Assorted information used by elf32_arm_size_stubs. */
3067 unsigned int bfd_count;
3068 int top_index;
3069 asection **input_list;
3070 };
3071
3072 /* Create an entry in an ARM ELF linker hash table. */
3073
3074 static struct bfd_hash_entry *
3075 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3076 struct bfd_hash_table * table,
3077 const char * string)
3078 {
3079 struct elf32_arm_link_hash_entry * ret =
3080 (struct elf32_arm_link_hash_entry *) entry;
3081
3082 /* Allocate the structure if it has not already been allocated by a
3083 subclass. */
3084 if (ret == NULL)
3085 ret = (struct elf32_arm_link_hash_entry *)
3086 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3087 if (ret == NULL)
3088 return (struct bfd_hash_entry *) ret;
3089
3090 /* Call the allocation method of the superclass. */
3091 ret = ((struct elf32_arm_link_hash_entry *)
3092 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3093 table, string));
3094 if (ret != NULL)
3095 {
3096 ret->dyn_relocs = NULL;
3097 ret->tls_type = GOT_UNKNOWN;
3098 ret->tlsdesc_got = (bfd_vma) -1;
3099 ret->plt.thumb_refcount = 0;
3100 ret->plt.maybe_thumb_refcount = 0;
3101 ret->plt.noncall_refcount = 0;
3102 ret->plt.got_offset = -1;
3103 ret->is_iplt = FALSE;
3104 ret->export_glue = NULL;
3105
3106 ret->stub_cache = NULL;
3107 }
3108
3109 return (struct bfd_hash_entry *) ret;
3110 }
3111
3112 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3113 symbols. */
3114
3115 static bfd_boolean
3116 elf32_arm_allocate_local_sym_info (bfd *abfd)
3117 {
3118 if (elf_local_got_refcounts (abfd) == NULL)
3119 {
3120 bfd_size_type num_syms;
3121 bfd_size_type size;
3122 char *data;
3123
3124 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3125 size = num_syms * (sizeof (bfd_signed_vma)
3126 + sizeof (struct arm_local_iplt_info *)
3127 + sizeof (bfd_vma)
3128 + sizeof (char));
3129 data = bfd_zalloc (abfd, size);
3130 if (data == NULL)
3131 return FALSE;
3132
3133 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3134 data += num_syms * sizeof (bfd_signed_vma);
3135
3136 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3137 data += num_syms * sizeof (struct arm_local_iplt_info *);
3138
3139 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3140 data += num_syms * sizeof (bfd_vma);
3141
3142 elf32_arm_local_got_tls_type (abfd) = data;
3143 }
3144 return TRUE;
3145 }
3146
3147 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3148 to input bfd ABFD. Create the information if it doesn't already exist.
3149 Return null if an allocation fails. */
3150
3151 static struct arm_local_iplt_info *
3152 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3153 {
3154 struct arm_local_iplt_info **ptr;
3155
3156 if (!elf32_arm_allocate_local_sym_info (abfd))
3157 return NULL;
3158
3159 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3160 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3161 if (*ptr == NULL)
3162 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3163 return *ptr;
3164 }
3165
3166 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3167 in ABFD's symbol table. If the symbol is global, H points to its
3168 hash table entry, otherwise H is null.
3169
3170 Return true if the symbol does have PLT information. When returning
3171 true, point *ROOT_PLT at the target-independent reference count/offset
3172 union and *ARM_PLT at the ARM-specific information. */
3173
3174 static bfd_boolean
3175 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3176 unsigned long r_symndx, union gotplt_union **root_plt,
3177 struct arm_plt_info **arm_plt)
3178 {
3179 struct arm_local_iplt_info *local_iplt;
3180
3181 if (h != NULL)
3182 {
3183 *root_plt = &h->root.plt;
3184 *arm_plt = &h->plt;
3185 return TRUE;
3186 }
3187
3188 if (elf32_arm_local_iplt (abfd) == NULL)
3189 return FALSE;
3190
3191 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3192 if (local_iplt == NULL)
3193 return FALSE;
3194
3195 *root_plt = &local_iplt->root;
3196 *arm_plt = &local_iplt->arm;
3197 return TRUE;
3198 }
3199
3200 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3201 before it. */
3202
3203 static bfd_boolean
3204 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3205 struct arm_plt_info *arm_plt)
3206 {
3207 struct elf32_arm_link_hash_table *htab;
3208
3209 htab = elf32_arm_hash_table (info);
3210 return (arm_plt->thumb_refcount != 0
3211 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3212 }
3213
3214 /* Return a pointer to the head of the dynamic reloc list that should
3215 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3216 ABFD's symbol table. Return null if an error occurs. */
3217
3218 static struct elf_dyn_relocs **
3219 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3220 Elf_Internal_Sym *isym)
3221 {
3222 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3223 {
3224 struct arm_local_iplt_info *local_iplt;
3225
3226 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3227 if (local_iplt == NULL)
3228 return NULL;
3229 return &local_iplt->dyn_relocs;
3230 }
3231 else
3232 {
3233 /* Track dynamic relocs needed for local syms too.
3234 We really need local syms available to do this
3235 easily. Oh well. */
3236 asection *s;
3237 void *vpp;
3238
3239 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3240 if (s == NULL)
3241 abort ();
3242
3243 vpp = &elf_section_data (s)->local_dynrel;
3244 return (struct elf_dyn_relocs **) vpp;
3245 }
3246 }
3247
3248 /* Initialize an entry in the stub hash table. */
3249
3250 static struct bfd_hash_entry *
3251 stub_hash_newfunc (struct bfd_hash_entry *entry,
3252 struct bfd_hash_table *table,
3253 const char *string)
3254 {
3255 /* Allocate the structure if it has not already been allocated by a
3256 subclass. */
3257 if (entry == NULL)
3258 {
3259 entry = (struct bfd_hash_entry *)
3260 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3261 if (entry == NULL)
3262 return entry;
3263 }
3264
3265 /* Call the allocation method of the superclass. */
3266 entry = bfd_hash_newfunc (entry, table, string);
3267 if (entry != NULL)
3268 {
3269 struct elf32_arm_stub_hash_entry *eh;
3270
3271 /* Initialize the local fields. */
3272 eh = (struct elf32_arm_stub_hash_entry *) entry;
3273 eh->stub_sec = NULL;
3274 eh->stub_offset = 0;
3275 eh->target_value = 0;
3276 eh->target_section = NULL;
3277 eh->target_addend = 0;
3278 eh->orig_insn = 0;
3279 eh->stub_type = arm_stub_none;
3280 eh->stub_size = 0;
3281 eh->stub_template = NULL;
3282 eh->stub_template_size = 0;
3283 eh->h = NULL;
3284 eh->id_sec = NULL;
3285 eh->output_name = NULL;
3286 }
3287
3288 return entry;
3289 }
3290
3291 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3292 shortcuts to them in our hash table. */
3293
3294 static bfd_boolean
3295 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3296 {
3297 struct elf32_arm_link_hash_table *htab;
3298
3299 htab = elf32_arm_hash_table (info);
3300 if (htab == NULL)
3301 return FALSE;
3302
3303 /* BPABI objects never have a GOT, or associated sections. */
3304 if (htab->symbian_p)
3305 return TRUE;
3306
3307 if (! _bfd_elf_create_got_section (dynobj, info))
3308 return FALSE;
3309
3310 return TRUE;
3311 }
3312
3313 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3314
3315 static bfd_boolean
3316 create_ifunc_sections (struct bfd_link_info *info)
3317 {
3318 struct elf32_arm_link_hash_table *htab;
3319 const struct elf_backend_data *bed;
3320 bfd *dynobj;
3321 asection *s;
3322 flagword flags;
3323
3324 htab = elf32_arm_hash_table (info);
3325 dynobj = htab->root.dynobj;
3326 bed = get_elf_backend_data (dynobj);
3327 flags = bed->dynamic_sec_flags;
3328
3329 if (htab->root.iplt == NULL)
3330 {
3331 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3332 flags | SEC_READONLY | SEC_CODE);
3333 if (s == NULL
3334 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3335 return FALSE;
3336 htab->root.iplt = s;
3337 }
3338
3339 if (htab->root.irelplt == NULL)
3340 {
3341 s = bfd_make_section_anyway_with_flags (dynobj,
3342 RELOC_SECTION (htab, ".iplt"),
3343 flags | SEC_READONLY);
3344 if (s == NULL
3345 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3346 return FALSE;
3347 htab->root.irelplt = s;
3348 }
3349
3350 if (htab->root.igotplt == NULL)
3351 {
3352 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3353 if (s == NULL
3354 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3355 return FALSE;
3356 htab->root.igotplt = s;
3357 }
3358 return TRUE;
3359 }
3360
3361 /* Determine if we're dealing with a Thumb only architecture. */
3362
3363 static bfd_boolean
3364 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3365 {
3366 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3367 Tag_CPU_arch);
3368 int profile;
3369
3370 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3371 return TRUE;
3372
3373 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3374 return FALSE;
3375
3376 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3377 Tag_CPU_arch_profile);
3378
3379 return profile == 'M';
3380 }
3381
3382 /* Determine if we're dealing with a Thumb-2 object. */
3383
3384 static bfd_boolean
3385 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3386 {
3387 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3388 Tag_CPU_arch);
3389 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3390 }
3391
3392 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3393 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3394 hash table. */
3395
3396 static bfd_boolean
3397 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3398 {
3399 struct elf32_arm_link_hash_table *htab;
3400
3401 htab = elf32_arm_hash_table (info);
3402 if (htab == NULL)
3403 return FALSE;
3404
3405 if (!htab->root.sgot && !create_got_section (dynobj, info))
3406 return FALSE;
3407
3408 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3409 return FALSE;
3410
3411 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3412 if (!info->shared)
3413 htab->srelbss = bfd_get_linker_section (dynobj,
3414 RELOC_SECTION (htab, ".bss"));
3415
3416 if (htab->vxworks_p)
3417 {
3418 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3419 return FALSE;
3420
3421 if (info->shared)
3422 {
3423 htab->plt_header_size = 0;
3424 htab->plt_entry_size
3425 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3426 }
3427 else
3428 {
3429 htab->plt_header_size
3430 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3431 htab->plt_entry_size
3432 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3433 }
3434 }
3435 else
3436 {
3437 /* PR ld/16017
3438 Test for thumb only architectures. Note - we cannot just call
3439 using_thumb_only() as the attributes in the output bfd have not been
3440 initialised at this point, so instead we use the input bfd. */
3441 bfd * saved_obfd = htab->obfd;
3442
3443 htab->obfd = dynobj;
3444 if (using_thumb_only (htab))
3445 {
3446 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3447 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3448 }
3449 htab->obfd = saved_obfd;
3450 }
3451
3452 if (!htab->root.splt
3453 || !htab->root.srelplt
3454 || !htab->sdynbss
3455 || (!info->shared && !htab->srelbss))
3456 abort ();
3457
3458 return TRUE;
3459 }
3460
3461 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3462
3463 static void
3464 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3465 struct elf_link_hash_entry *dir,
3466 struct elf_link_hash_entry *ind)
3467 {
3468 struct elf32_arm_link_hash_entry *edir, *eind;
3469
3470 edir = (struct elf32_arm_link_hash_entry *) dir;
3471 eind = (struct elf32_arm_link_hash_entry *) ind;
3472
3473 if (eind->dyn_relocs != NULL)
3474 {
3475 if (edir->dyn_relocs != NULL)
3476 {
3477 struct elf_dyn_relocs **pp;
3478 struct elf_dyn_relocs *p;
3479
3480 /* Add reloc counts against the indirect sym to the direct sym
3481 list. Merge any entries against the same section. */
3482 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3483 {
3484 struct elf_dyn_relocs *q;
3485
3486 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3487 if (q->sec == p->sec)
3488 {
3489 q->pc_count += p->pc_count;
3490 q->count += p->count;
3491 *pp = p->next;
3492 break;
3493 }
3494 if (q == NULL)
3495 pp = &p->next;
3496 }
3497 *pp = edir->dyn_relocs;
3498 }
3499
3500 edir->dyn_relocs = eind->dyn_relocs;
3501 eind->dyn_relocs = NULL;
3502 }
3503
3504 if (ind->root.type == bfd_link_hash_indirect)
3505 {
3506 /* Copy over PLT info. */
3507 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3508 eind->plt.thumb_refcount = 0;
3509 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3510 eind->plt.maybe_thumb_refcount = 0;
3511 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3512 eind->plt.noncall_refcount = 0;
3513
3514 /* We should only allocate a function to .iplt once the final
3515 symbol information is known. */
3516 BFD_ASSERT (!eind->is_iplt);
3517
3518 if (dir->got.refcount <= 0)
3519 {
3520 edir->tls_type = eind->tls_type;
3521 eind->tls_type = GOT_UNKNOWN;
3522 }
3523 }
3524
3525 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3526 }
3527
3528 /* Destroy an ARM elf linker hash table. */
3529
3530 static void
3531 elf32_arm_link_hash_table_free (bfd *obfd)
3532 {
3533 struct elf32_arm_link_hash_table *ret
3534 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3535
3536 bfd_hash_table_free (&ret->stub_hash_table);
3537 _bfd_elf_link_hash_table_free (obfd);
3538 }
3539
3540 /* Create an ARM elf linker hash table. */
3541
3542 static struct bfd_link_hash_table *
3543 elf32_arm_link_hash_table_create (bfd *abfd)
3544 {
3545 struct elf32_arm_link_hash_table *ret;
3546 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3547
3548 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3549 if (ret == NULL)
3550 return NULL;
3551
3552 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3553 elf32_arm_link_hash_newfunc,
3554 sizeof (struct elf32_arm_link_hash_entry),
3555 ARM_ELF_DATA))
3556 {
3557 free (ret);
3558 return NULL;
3559 }
3560
3561 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3562 #ifdef FOUR_WORD_PLT
3563 ret->plt_header_size = 16;
3564 ret->plt_entry_size = 16;
3565 #else
3566 ret->plt_header_size = 20;
3567 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3568 #endif
3569 ret->use_rel = 1;
3570 ret->obfd = abfd;
3571
3572 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3573 sizeof (struct elf32_arm_stub_hash_entry)))
3574 {
3575 _bfd_elf_link_hash_table_free (abfd);
3576 return NULL;
3577 }
3578 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3579
3580 return &ret->root.root;
3581 }
3582
3583 /* Determine what kind of NOPs are available. */
3584
3585 static bfd_boolean
3586 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3587 {
3588 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3589 Tag_CPU_arch);
3590 return arch == TAG_CPU_ARCH_V6T2
3591 || arch == TAG_CPU_ARCH_V6K
3592 || arch == TAG_CPU_ARCH_V7
3593 || arch == TAG_CPU_ARCH_V7E_M;
3594 }
3595
3596 static bfd_boolean
3597 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3598 {
3599 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3600 Tag_CPU_arch);
3601 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3602 || arch == TAG_CPU_ARCH_V7E_M);
3603 }
3604
3605 static bfd_boolean
3606 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3607 {
3608 switch (stub_type)
3609 {
3610 case arm_stub_long_branch_thumb_only:
3611 case arm_stub_long_branch_v4t_thumb_arm:
3612 case arm_stub_short_branch_v4t_thumb_arm:
3613 case arm_stub_long_branch_v4t_thumb_arm_pic:
3614 case arm_stub_long_branch_v4t_thumb_tls_pic:
3615 case arm_stub_long_branch_thumb_only_pic:
3616 return TRUE;
3617 case arm_stub_none:
3618 BFD_FAIL ();
3619 return FALSE;
3620 break;
3621 default:
3622 return FALSE;
3623 }
3624 }
3625
3626 /* Determine the type of stub needed, if any, for a call. */
3627
3628 static enum elf32_arm_stub_type
3629 arm_type_of_stub (struct bfd_link_info *info,
3630 asection *input_sec,
3631 const Elf_Internal_Rela *rel,
3632 unsigned char st_type,
3633 enum arm_st_branch_type *actual_branch_type,
3634 struct elf32_arm_link_hash_entry *hash,
3635 bfd_vma destination,
3636 asection *sym_sec,
3637 bfd *input_bfd,
3638 const char *name)
3639 {
3640 bfd_vma location;
3641 bfd_signed_vma branch_offset;
3642 unsigned int r_type;
3643 struct elf32_arm_link_hash_table * globals;
3644 int thumb2;
3645 int thumb_only;
3646 enum elf32_arm_stub_type stub_type = arm_stub_none;
3647 int use_plt = 0;
3648 enum arm_st_branch_type branch_type = *actual_branch_type;
3649 union gotplt_union *root_plt;
3650 struct arm_plt_info *arm_plt;
3651
3652 if (branch_type == ST_BRANCH_LONG)
3653 return stub_type;
3654
3655 globals = elf32_arm_hash_table (info);
3656 if (globals == NULL)
3657 return stub_type;
3658
3659 thumb_only = using_thumb_only (globals);
3660
3661 thumb2 = using_thumb2 (globals);
3662
3663 /* Determine where the call point is. */
3664 location = (input_sec->output_offset
3665 + input_sec->output_section->vma
3666 + rel->r_offset);
3667
3668 r_type = ELF32_R_TYPE (rel->r_info);
3669
3670 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3671 are considering a function call relocation. */
3672 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3673 || r_type == R_ARM_THM_JUMP19)
3674 && branch_type == ST_BRANCH_TO_ARM)
3675 branch_type = ST_BRANCH_TO_THUMB;
3676
3677 /* For TLS call relocs, it is the caller's responsibility to provide
3678 the address of the appropriate trampoline. */
3679 if (r_type != R_ARM_TLS_CALL
3680 && r_type != R_ARM_THM_TLS_CALL
3681 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3682 &root_plt, &arm_plt)
3683 && root_plt->offset != (bfd_vma) -1)
3684 {
3685 asection *splt;
3686
3687 if (hash == NULL || hash->is_iplt)
3688 splt = globals->root.iplt;
3689 else
3690 splt = globals->root.splt;
3691 if (splt != NULL)
3692 {
3693 use_plt = 1;
3694
3695 /* Note when dealing with PLT entries: the main PLT stub is in
3696 ARM mode, so if the branch is in Thumb mode, another
3697 Thumb->ARM stub will be inserted later just before the ARM
3698 PLT stub. We don't take this extra distance into account
3699 here, because if a long branch stub is needed, we'll add a
3700 Thumb->Arm one and branch directly to the ARM PLT entry
3701 because it avoids spreading offset corrections in several
3702 places. */
3703
3704 destination = (splt->output_section->vma
3705 + splt->output_offset
3706 + root_plt->offset);
3707 st_type = STT_FUNC;
3708 branch_type = ST_BRANCH_TO_ARM;
3709 }
3710 }
3711 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3712 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3713
3714 branch_offset = (bfd_signed_vma)(destination - location);
3715
3716 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3717 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3718 {
3719 /* Handle cases where:
3720 - this call goes too far (different Thumb/Thumb2 max
3721 distance)
3722 - it's a Thumb->Arm call and blx is not available, or it's a
3723 Thumb->Arm branch (not bl). A stub is needed in this case,
3724 but only if this call is not through a PLT entry. Indeed,
3725 PLT stubs handle mode switching already.
3726 */
3727 if ((!thumb2
3728 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3729 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3730 || (thumb2
3731 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3732 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3733 || (thumb2
3734 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3735 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3736 && (r_type == R_ARM_THM_JUMP19))
3737 || (branch_type == ST_BRANCH_TO_ARM
3738 && (((r_type == R_ARM_THM_CALL
3739 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3740 || (r_type == R_ARM_THM_JUMP24)
3741 || (r_type == R_ARM_THM_JUMP19))
3742 && !use_plt))
3743 {
3744 if (branch_type == ST_BRANCH_TO_THUMB)
3745 {
3746 /* Thumb to thumb. */
3747 if (!thumb_only)
3748 {
3749 stub_type = (info->shared | globals->pic_veneer)
3750 /* PIC stubs. */
3751 ? ((globals->use_blx
3752 && (r_type == R_ARM_THM_CALL))
3753 /* V5T and above. Stub starts with ARM code, so
3754 we must be able to switch mode before
3755 reaching it, which is only possible for 'bl'
3756 (ie R_ARM_THM_CALL relocation). */
3757 ? arm_stub_long_branch_any_thumb_pic
3758 /* On V4T, use Thumb code only. */
3759 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3760
3761 /* non-PIC stubs. */
3762 : ((globals->use_blx
3763 && (r_type == R_ARM_THM_CALL))
3764 /* V5T and above. */
3765 ? arm_stub_long_branch_any_any
3766 /* V4T. */
3767 : arm_stub_long_branch_v4t_thumb_thumb);
3768 }
3769 else
3770 {
3771 stub_type = (info->shared | globals->pic_veneer)
3772 /* PIC stub. */
3773 ? arm_stub_long_branch_thumb_only_pic
3774 /* non-PIC stub. */
3775 : arm_stub_long_branch_thumb_only;
3776 }
3777 }
3778 else
3779 {
3780 /* Thumb to arm. */
3781 if (sym_sec != NULL
3782 && sym_sec->owner != NULL
3783 && !INTERWORK_FLAG (sym_sec->owner))
3784 {
3785 (*_bfd_error_handler)
3786 (_("%B(%s): warning: interworking not enabled.\n"
3787 " first occurrence: %B: Thumb call to ARM"),
3788 sym_sec->owner, input_bfd, name);
3789 }
3790
3791 stub_type =
3792 (info->shared | globals->pic_veneer)
3793 /* PIC stubs. */
3794 ? (r_type == R_ARM_THM_TLS_CALL
3795 /* TLS PIC stubs. */
3796 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3797 : arm_stub_long_branch_v4t_thumb_tls_pic)
3798 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3799 /* V5T PIC and above. */
3800 ? arm_stub_long_branch_any_arm_pic
3801 /* V4T PIC stub. */
3802 : arm_stub_long_branch_v4t_thumb_arm_pic))
3803
3804 /* non-PIC stubs. */
3805 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3806 /* V5T and above. */
3807 ? arm_stub_long_branch_any_any
3808 /* V4T. */
3809 : arm_stub_long_branch_v4t_thumb_arm);
3810
3811 /* Handle v4t short branches. */
3812 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3813 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3814 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3815 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3816 }
3817 }
3818 }
3819 else if (r_type == R_ARM_CALL
3820 || r_type == R_ARM_JUMP24
3821 || r_type == R_ARM_PLT32
3822 || r_type == R_ARM_TLS_CALL)
3823 {
3824 if (branch_type == ST_BRANCH_TO_THUMB)
3825 {
3826 /* Arm to thumb. */
3827
3828 if (sym_sec != NULL
3829 && sym_sec->owner != NULL
3830 && !INTERWORK_FLAG (sym_sec->owner))
3831 {
3832 (*_bfd_error_handler)
3833 (_("%B(%s): warning: interworking not enabled.\n"
3834 " first occurrence: %B: ARM call to Thumb"),
3835 sym_sec->owner, input_bfd, name);
3836 }
3837
3838 /* We have an extra 2-bytes reach because of
3839 the mode change (bit 24 (H) of BLX encoding). */
3840 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3841 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3842 || (r_type == R_ARM_CALL && !globals->use_blx)
3843 || (r_type == R_ARM_JUMP24)
3844 || (r_type == R_ARM_PLT32))
3845 {
3846 stub_type = (info->shared | globals->pic_veneer)
3847 /* PIC stubs. */
3848 ? ((globals->use_blx)
3849 /* V5T and above. */
3850 ? arm_stub_long_branch_any_thumb_pic
3851 /* V4T stub. */
3852 : arm_stub_long_branch_v4t_arm_thumb_pic)
3853
3854 /* non-PIC stubs. */
3855 : ((globals->use_blx)
3856 /* V5T and above. */
3857 ? arm_stub_long_branch_any_any
3858 /* V4T. */
3859 : arm_stub_long_branch_v4t_arm_thumb);
3860 }
3861 }
3862 else
3863 {
3864 /* Arm to arm. */
3865 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3866 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3867 {
3868 stub_type =
3869 (info->shared | globals->pic_veneer)
3870 /* PIC stubs. */
3871 ? (r_type == R_ARM_TLS_CALL
3872 /* TLS PIC Stub. */
3873 ? arm_stub_long_branch_any_tls_pic
3874 : (globals->nacl_p
3875 ? arm_stub_long_branch_arm_nacl_pic
3876 : arm_stub_long_branch_any_arm_pic))
3877 /* non-PIC stubs. */
3878 : (globals->nacl_p
3879 ? arm_stub_long_branch_arm_nacl
3880 : arm_stub_long_branch_any_any);
3881 }
3882 }
3883 }
3884
3885 /* If a stub is needed, record the actual destination type. */
3886 if (stub_type != arm_stub_none)
3887 *actual_branch_type = branch_type;
3888
3889 return stub_type;
3890 }
3891
3892 /* Build a name for an entry in the stub hash table. */
3893
3894 static char *
3895 elf32_arm_stub_name (const asection *input_section,
3896 const asection *sym_sec,
3897 const struct elf32_arm_link_hash_entry *hash,
3898 const Elf_Internal_Rela *rel,
3899 enum elf32_arm_stub_type stub_type)
3900 {
3901 char *stub_name;
3902 bfd_size_type len;
3903
3904 if (hash)
3905 {
3906 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3907 stub_name = (char *) bfd_malloc (len);
3908 if (stub_name != NULL)
3909 sprintf (stub_name, "%08x_%s+%x_%d",
3910 input_section->id & 0xffffffff,
3911 hash->root.root.root.string,
3912 (int) rel->r_addend & 0xffffffff,
3913 (int) stub_type);
3914 }
3915 else
3916 {
3917 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3918 stub_name = (char *) bfd_malloc (len);
3919 if (stub_name != NULL)
3920 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3921 input_section->id & 0xffffffff,
3922 sym_sec->id & 0xffffffff,
3923 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3924 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3925 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3926 (int) rel->r_addend & 0xffffffff,
3927 (int) stub_type);
3928 }
3929
3930 return stub_name;
3931 }
3932
3933 /* Look up an entry in the stub hash. Stub entries are cached because
3934 creating the stub name takes a bit of time. */
3935
3936 static struct elf32_arm_stub_hash_entry *
3937 elf32_arm_get_stub_entry (const asection *input_section,
3938 const asection *sym_sec,
3939 struct elf_link_hash_entry *hash,
3940 const Elf_Internal_Rela *rel,
3941 struct elf32_arm_link_hash_table *htab,
3942 enum elf32_arm_stub_type stub_type)
3943 {
3944 struct elf32_arm_stub_hash_entry *stub_entry;
3945 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3946 const asection *id_sec;
3947
3948 if ((input_section->flags & SEC_CODE) == 0)
3949 return NULL;
3950
3951 /* If this input section is part of a group of sections sharing one
3952 stub section, then use the id of the first section in the group.
3953 Stub names need to include a section id, as there may well be
3954 more than one stub used to reach say, printf, and we need to
3955 distinguish between them. */
3956 id_sec = htab->stub_group[input_section->id].link_sec;
3957
3958 if (h != NULL && h->stub_cache != NULL
3959 && h->stub_cache->h == h
3960 && h->stub_cache->id_sec == id_sec
3961 && h->stub_cache->stub_type == stub_type)
3962 {
3963 stub_entry = h->stub_cache;
3964 }
3965 else
3966 {
3967 char *stub_name;
3968
3969 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3970 if (stub_name == NULL)
3971 return NULL;
3972
3973 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3974 stub_name, FALSE, FALSE);
3975 if (h != NULL)
3976 h->stub_cache = stub_entry;
3977
3978 free (stub_name);
3979 }
3980
3981 return stub_entry;
3982 }
3983
3984 /* Find or create a stub section. Returns a pointer to the stub section, and
3985 the section to which the stub section will be attached (in *LINK_SEC_P).
3986 LINK_SEC_P may be NULL. */
3987
3988 static asection *
3989 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3990 struct elf32_arm_link_hash_table *htab)
3991 {
3992 asection *link_sec;
3993 asection *stub_sec;
3994
3995 link_sec = htab->stub_group[section->id].link_sec;
3996 BFD_ASSERT (link_sec != NULL);
3997 stub_sec = htab->stub_group[section->id].stub_sec;
3998
3999 if (stub_sec == NULL)
4000 {
4001 stub_sec = htab->stub_group[link_sec->id].stub_sec;
4002 if (stub_sec == NULL)
4003 {
4004 size_t namelen;
4005 bfd_size_type len;
4006 char *s_name;
4007
4008 namelen = strlen (link_sec->name);
4009 len = namelen + sizeof (STUB_SUFFIX);
4010 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4011 if (s_name == NULL)
4012 return NULL;
4013
4014 memcpy (s_name, link_sec->name, namelen);
4015 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4016 stub_sec = (*htab->add_stub_section) (s_name, link_sec,
4017 htab->nacl_p ? 4 : 3);
4018 if (stub_sec == NULL)
4019 return NULL;
4020 htab->stub_group[link_sec->id].stub_sec = stub_sec;
4021 }
4022 htab->stub_group[section->id].stub_sec = stub_sec;
4023 }
4024
4025 if (link_sec_p)
4026 *link_sec_p = link_sec;
4027
4028 return stub_sec;
4029 }
4030
4031 /* Add a new stub entry to the stub hash. Not all fields of the new
4032 stub entry are initialised. */
4033
4034 static struct elf32_arm_stub_hash_entry *
4035 elf32_arm_add_stub (const char *stub_name,
4036 asection *section,
4037 struct elf32_arm_link_hash_table *htab)
4038 {
4039 asection *link_sec;
4040 asection *stub_sec;
4041 struct elf32_arm_stub_hash_entry *stub_entry;
4042
4043 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
4044 if (stub_sec == NULL)
4045 return NULL;
4046
4047 /* Enter this entry into the linker stub hash table. */
4048 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4049 TRUE, FALSE);
4050 if (stub_entry == NULL)
4051 {
4052 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4053 section->owner,
4054 stub_name);
4055 return NULL;
4056 }
4057
4058 stub_entry->stub_sec = stub_sec;
4059 stub_entry->stub_offset = 0;
4060 stub_entry->id_sec = link_sec;
4061
4062 return stub_entry;
4063 }
4064
4065 /* Store an Arm insn into an output section not processed by
4066 elf32_arm_write_section. */
4067
4068 static void
4069 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4070 bfd * output_bfd, bfd_vma val, void * ptr)
4071 {
4072 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4073 bfd_putl32 (val, ptr);
4074 else
4075 bfd_putb32 (val, ptr);
4076 }
4077
4078 /* Store a 16-bit Thumb insn into an output section not processed by
4079 elf32_arm_write_section. */
4080
4081 static void
4082 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4083 bfd * output_bfd, bfd_vma val, void * ptr)
4084 {
4085 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4086 bfd_putl16 (val, ptr);
4087 else
4088 bfd_putb16 (val, ptr);
4089 }
4090
4091 /* If it's possible to change R_TYPE to a more efficient access
4092 model, return the new reloc type. */
4093
4094 static unsigned
4095 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4096 struct elf_link_hash_entry *h)
4097 {
4098 int is_local = (h == NULL);
4099
4100 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
4101 return r_type;
4102
4103 /* We do not support relaxations for Old TLS models. */
4104 switch (r_type)
4105 {
4106 case R_ARM_TLS_GOTDESC:
4107 case R_ARM_TLS_CALL:
4108 case R_ARM_THM_TLS_CALL:
4109 case R_ARM_TLS_DESCSEQ:
4110 case R_ARM_THM_TLS_DESCSEQ:
4111 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4112 }
4113
4114 return r_type;
4115 }
4116
4117 static bfd_reloc_status_type elf32_arm_final_link_relocate
4118 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4119 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4120 const char *, unsigned char, enum arm_st_branch_type,
4121 struct elf_link_hash_entry *, bfd_boolean *, char **);
4122
4123 static unsigned int
4124 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4125 {
4126 switch (stub_type)
4127 {
4128 case arm_stub_a8_veneer_b_cond:
4129 case arm_stub_a8_veneer_b:
4130 case arm_stub_a8_veneer_bl:
4131 return 2;
4132
4133 case arm_stub_long_branch_any_any:
4134 case arm_stub_long_branch_v4t_arm_thumb:
4135 case arm_stub_long_branch_thumb_only:
4136 case arm_stub_long_branch_v4t_thumb_thumb:
4137 case arm_stub_long_branch_v4t_thumb_arm:
4138 case arm_stub_short_branch_v4t_thumb_arm:
4139 case arm_stub_long_branch_any_arm_pic:
4140 case arm_stub_long_branch_any_thumb_pic:
4141 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4142 case arm_stub_long_branch_v4t_arm_thumb_pic:
4143 case arm_stub_long_branch_v4t_thumb_arm_pic:
4144 case arm_stub_long_branch_thumb_only_pic:
4145 case arm_stub_long_branch_any_tls_pic:
4146 case arm_stub_long_branch_v4t_thumb_tls_pic:
4147 case arm_stub_a8_veneer_blx:
4148 return 4;
4149
4150 case arm_stub_long_branch_arm_nacl:
4151 case arm_stub_long_branch_arm_nacl_pic:
4152 return 16;
4153
4154 default:
4155 abort (); /* Should be unreachable. */
4156 }
4157 }
4158
4159 static bfd_boolean
4160 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4161 void * in_arg)
4162 {
4163 #define MAXRELOCS 3
4164 struct elf32_arm_stub_hash_entry *stub_entry;
4165 struct elf32_arm_link_hash_table *globals;
4166 struct bfd_link_info *info;
4167 asection *stub_sec;
4168 bfd *stub_bfd;
4169 bfd_byte *loc;
4170 bfd_vma sym_value;
4171 int template_size;
4172 int size;
4173 const insn_sequence *template_sequence;
4174 int i;
4175 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4176 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4177 int nrelocs = 0;
4178
4179 /* Massage our args to the form they really have. */
4180 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4181 info = (struct bfd_link_info *) in_arg;
4182
4183 globals = elf32_arm_hash_table (info);
4184 if (globals == NULL)
4185 return FALSE;
4186
4187 stub_sec = stub_entry->stub_sec;
4188
4189 if ((globals->fix_cortex_a8 < 0)
4190 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4191 /* We have to do less-strictly-aligned fixes last. */
4192 return TRUE;
4193
4194 /* Make a note of the offset within the stubs for this entry. */
4195 stub_entry->stub_offset = stub_sec->size;
4196 loc = stub_sec->contents + stub_entry->stub_offset;
4197
4198 stub_bfd = stub_sec->owner;
4199
4200 /* This is the address of the stub destination. */
4201 sym_value = (stub_entry->target_value
4202 + stub_entry->target_section->output_offset
4203 + stub_entry->target_section->output_section->vma);
4204
4205 template_sequence = stub_entry->stub_template;
4206 template_size = stub_entry->stub_template_size;
4207
4208 size = 0;
4209 for (i = 0; i < template_size; i++)
4210 {
4211 switch (template_sequence[i].type)
4212 {
4213 case THUMB16_TYPE:
4214 {
4215 bfd_vma data = (bfd_vma) template_sequence[i].data;
4216 if (template_sequence[i].reloc_addend != 0)
4217 {
4218 /* We've borrowed the reloc_addend field to mean we should
4219 insert a condition code into this (Thumb-1 branch)
4220 instruction. See THUMB16_BCOND_INSN. */
4221 BFD_ASSERT ((data & 0xff00) == 0xd000);
4222 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4223 }
4224 bfd_put_16 (stub_bfd, data, loc + size);
4225 size += 2;
4226 }
4227 break;
4228
4229 case THUMB32_TYPE:
4230 bfd_put_16 (stub_bfd,
4231 (template_sequence[i].data >> 16) & 0xffff,
4232 loc + size);
4233 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4234 loc + size + 2);
4235 if (template_sequence[i].r_type != R_ARM_NONE)
4236 {
4237 stub_reloc_idx[nrelocs] = i;
4238 stub_reloc_offset[nrelocs++] = size;
4239 }
4240 size += 4;
4241 break;
4242
4243 case ARM_TYPE:
4244 bfd_put_32 (stub_bfd, template_sequence[i].data,
4245 loc + size);
4246 /* Handle cases where the target is encoded within the
4247 instruction. */
4248 if (template_sequence[i].r_type == R_ARM_JUMP24)
4249 {
4250 stub_reloc_idx[nrelocs] = i;
4251 stub_reloc_offset[nrelocs++] = size;
4252 }
4253 size += 4;
4254 break;
4255
4256 case DATA_TYPE:
4257 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4258 stub_reloc_idx[nrelocs] = i;
4259 stub_reloc_offset[nrelocs++] = size;
4260 size += 4;
4261 break;
4262
4263 default:
4264 BFD_FAIL ();
4265 return FALSE;
4266 }
4267 }
4268
4269 stub_sec->size += size;
4270
4271 /* Stub size has already been computed in arm_size_one_stub. Check
4272 consistency. */
4273 BFD_ASSERT (size == stub_entry->stub_size);
4274
4275 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4276 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4277 sym_value |= 1;
4278
4279 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4280 in each stub. */
4281 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4282
4283 for (i = 0; i < nrelocs; i++)
4284 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4285 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4286 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4287 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4288 {
4289 Elf_Internal_Rela rel;
4290 bfd_boolean unresolved_reloc;
4291 char *error_message;
4292 enum arm_st_branch_type branch_type
4293 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4294 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4295 bfd_vma points_to = sym_value + stub_entry->target_addend;
4296
4297 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4298 rel.r_info = ELF32_R_INFO (0,
4299 template_sequence[stub_reloc_idx[i]].r_type);
4300 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4301
4302 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4303 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4304 template should refer back to the instruction after the original
4305 branch. */
4306 points_to = sym_value;
4307
4308 /* There may be unintended consequences if this is not true. */
4309 BFD_ASSERT (stub_entry->h == NULL);
4310
4311 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4312 properly. We should probably use this function unconditionally,
4313 rather than only for certain relocations listed in the enclosing
4314 conditional, for the sake of consistency. */
4315 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4316 (template_sequence[stub_reloc_idx[i]].r_type),
4317 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4318 points_to, info, stub_entry->target_section, "", STT_FUNC,
4319 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4320 &unresolved_reloc, &error_message);
4321 }
4322 else
4323 {
4324 Elf_Internal_Rela rel;
4325 bfd_boolean unresolved_reloc;
4326 char *error_message;
4327 bfd_vma points_to = sym_value + stub_entry->target_addend
4328 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4329
4330 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4331 rel.r_info = ELF32_R_INFO (0,
4332 template_sequence[stub_reloc_idx[i]].r_type);
4333 rel.r_addend = 0;
4334
4335 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4336 (template_sequence[stub_reloc_idx[i]].r_type),
4337 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4338 points_to, info, stub_entry->target_section, "", STT_FUNC,
4339 stub_entry->branch_type,
4340 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4341 &error_message);
4342 }
4343
4344 return TRUE;
4345 #undef MAXRELOCS
4346 }
4347
4348 /* Calculate the template, template size and instruction size for a stub.
4349 Return value is the instruction size. */
4350
4351 static unsigned int
4352 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4353 const insn_sequence **stub_template,
4354 int *stub_template_size)
4355 {
4356 const insn_sequence *template_sequence = NULL;
4357 int template_size = 0, i;
4358 unsigned int size;
4359
4360 template_sequence = stub_definitions[stub_type].template_sequence;
4361 if (stub_template)
4362 *stub_template = template_sequence;
4363
4364 template_size = stub_definitions[stub_type].template_size;
4365 if (stub_template_size)
4366 *stub_template_size = template_size;
4367
4368 size = 0;
4369 for (i = 0; i < template_size; i++)
4370 {
4371 switch (template_sequence[i].type)
4372 {
4373 case THUMB16_TYPE:
4374 size += 2;
4375 break;
4376
4377 case ARM_TYPE:
4378 case THUMB32_TYPE:
4379 case DATA_TYPE:
4380 size += 4;
4381 break;
4382
4383 default:
4384 BFD_FAIL ();
4385 return 0;
4386 }
4387 }
4388
4389 return size;
4390 }
4391
4392 /* As above, but don't actually build the stub. Just bump offset so
4393 we know stub section sizes. */
4394
4395 static bfd_boolean
4396 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4397 void *in_arg ATTRIBUTE_UNUSED)
4398 {
4399 struct elf32_arm_stub_hash_entry *stub_entry;
4400 const insn_sequence *template_sequence;
4401 int template_size, size;
4402
4403 /* Massage our args to the form they really have. */
4404 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4405
4406 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4407 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4408
4409 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4410 &template_size);
4411
4412 stub_entry->stub_size = size;
4413 stub_entry->stub_template = template_sequence;
4414 stub_entry->stub_template_size = template_size;
4415
4416 size = (size + 7) & ~7;
4417 stub_entry->stub_sec->size += size;
4418
4419 return TRUE;
4420 }
4421
4422 /* External entry points for sizing and building linker stubs. */
4423
4424 /* Set up various things so that we can make a list of input sections
4425 for each output section included in the link. Returns -1 on error,
4426 0 when no stubs will be needed, and 1 on success. */
4427
4428 int
4429 elf32_arm_setup_section_lists (bfd *output_bfd,
4430 struct bfd_link_info *info)
4431 {
4432 bfd *input_bfd;
4433 unsigned int bfd_count;
4434 int top_id, top_index;
4435 asection *section;
4436 asection **input_list, **list;
4437 bfd_size_type amt;
4438 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4439
4440 if (htab == NULL)
4441 return 0;
4442 if (! is_elf_hash_table (htab))
4443 return 0;
4444
4445 /* Count the number of input BFDs and find the top input section id. */
4446 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4447 input_bfd != NULL;
4448 input_bfd = input_bfd->link.next)
4449 {
4450 bfd_count += 1;
4451 for (section = input_bfd->sections;
4452 section != NULL;
4453 section = section->next)
4454 {
4455 if (top_id < section->id)
4456 top_id = section->id;
4457 }
4458 }
4459 htab->bfd_count = bfd_count;
4460
4461 amt = sizeof (struct map_stub) * (top_id + 1);
4462 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4463 if (htab->stub_group == NULL)
4464 return -1;
4465 htab->top_id = top_id;
4466
4467 /* We can't use output_bfd->section_count here to find the top output
4468 section index as some sections may have been removed, and
4469 _bfd_strip_section_from_output doesn't renumber the indices. */
4470 for (section = output_bfd->sections, top_index = 0;
4471 section != NULL;
4472 section = section->next)
4473 {
4474 if (top_index < section->index)
4475 top_index = section->index;
4476 }
4477
4478 htab->top_index = top_index;
4479 amt = sizeof (asection *) * (top_index + 1);
4480 input_list = (asection **) bfd_malloc (amt);
4481 htab->input_list = input_list;
4482 if (input_list == NULL)
4483 return -1;
4484
4485 /* For sections we aren't interested in, mark their entries with a
4486 value we can check later. */
4487 list = input_list + top_index;
4488 do
4489 *list = bfd_abs_section_ptr;
4490 while (list-- != input_list);
4491
4492 for (section = output_bfd->sections;
4493 section != NULL;
4494 section = section->next)
4495 {
4496 if ((section->flags & SEC_CODE) != 0)
4497 input_list[section->index] = NULL;
4498 }
4499
4500 return 1;
4501 }
4502
4503 /* The linker repeatedly calls this function for each input section,
4504 in the order that input sections are linked into output sections.
4505 Build lists of input sections to determine groupings between which
4506 we may insert linker stubs. */
4507
4508 void
4509 elf32_arm_next_input_section (struct bfd_link_info *info,
4510 asection *isec)
4511 {
4512 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4513
4514 if (htab == NULL)
4515 return;
4516
4517 if (isec->output_section->index <= htab->top_index)
4518 {
4519 asection **list = htab->input_list + isec->output_section->index;
4520
4521 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4522 {
4523 /* Steal the link_sec pointer for our list. */
4524 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4525 /* This happens to make the list in reverse order,
4526 which we reverse later. */
4527 PREV_SEC (isec) = *list;
4528 *list = isec;
4529 }
4530 }
4531 }
4532
4533 /* See whether we can group stub sections together. Grouping stub
4534 sections may result in fewer stubs. More importantly, we need to
4535 put all .init* and .fini* stubs at the end of the .init or
4536 .fini output sections respectively, because glibc splits the
4537 _init and _fini functions into multiple parts. Putting a stub in
4538 the middle of a function is not a good idea. */
4539
4540 static void
4541 group_sections (struct elf32_arm_link_hash_table *htab,
4542 bfd_size_type stub_group_size,
4543 bfd_boolean stubs_always_after_branch)
4544 {
4545 asection **list = htab->input_list;
4546
4547 do
4548 {
4549 asection *tail = *list;
4550 asection *head;
4551
4552 if (tail == bfd_abs_section_ptr)
4553 continue;
4554
4555 /* Reverse the list: we must avoid placing stubs at the
4556 beginning of the section because the beginning of the text
4557 section may be required for an interrupt vector in bare metal
4558 code. */
4559 #define NEXT_SEC PREV_SEC
4560 head = NULL;
4561 while (tail != NULL)
4562 {
4563 /* Pop from tail. */
4564 asection *item = tail;
4565 tail = PREV_SEC (item);
4566
4567 /* Push on head. */
4568 NEXT_SEC (item) = head;
4569 head = item;
4570 }
4571
4572 while (head != NULL)
4573 {
4574 asection *curr;
4575 asection *next;
4576 bfd_vma stub_group_start = head->output_offset;
4577 bfd_vma end_of_next;
4578
4579 curr = head;
4580 while (NEXT_SEC (curr) != NULL)
4581 {
4582 next = NEXT_SEC (curr);
4583 end_of_next = next->output_offset + next->size;
4584 if (end_of_next - stub_group_start >= stub_group_size)
4585 /* End of NEXT is too far from start, so stop. */
4586 break;
4587 /* Add NEXT to the group. */
4588 curr = next;
4589 }
4590
4591 /* OK, the size from the start to the start of CURR is less
4592 than stub_group_size and thus can be handled by one stub
4593 section. (Or the head section is itself larger than
4594 stub_group_size, in which case we may be toast.)
4595 We should really be keeping track of the total size of
4596 stubs added here, as stubs contribute to the final output
4597 section size. */
4598 do
4599 {
4600 next = NEXT_SEC (head);
4601 /* Set up this stub group. */
4602 htab->stub_group[head->id].link_sec = curr;
4603 }
4604 while (head != curr && (head = next) != NULL);
4605
4606 /* But wait, there's more! Input sections up to stub_group_size
4607 bytes after the stub section can be handled by it too. */
4608 if (!stubs_always_after_branch)
4609 {
4610 stub_group_start = curr->output_offset + curr->size;
4611
4612 while (next != NULL)
4613 {
4614 end_of_next = next->output_offset + next->size;
4615 if (end_of_next - stub_group_start >= stub_group_size)
4616 /* End of NEXT is too far from stubs, so stop. */
4617 break;
4618 /* Add NEXT to the stub group. */
4619 head = next;
4620 next = NEXT_SEC (head);
4621 htab->stub_group[head->id].link_sec = curr;
4622 }
4623 }
4624 head = next;
4625 }
4626 }
4627 while (list++ != htab->input_list + htab->top_index);
4628
4629 free (htab->input_list);
4630 #undef PREV_SEC
4631 #undef NEXT_SEC
4632 }
4633
4634 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4635 erratum fix. */
4636
4637 static int
4638 a8_reloc_compare (const void *a, const void *b)
4639 {
4640 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4641 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4642
4643 if (ra->from < rb->from)
4644 return -1;
4645 else if (ra->from > rb->from)
4646 return 1;
4647 else
4648 return 0;
4649 }
4650
4651 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4652 const char *, char **);
4653
4654 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4655 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4656 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4657 otherwise. */
4658
4659 static bfd_boolean
4660 cortex_a8_erratum_scan (bfd *input_bfd,
4661 struct bfd_link_info *info,
4662 struct a8_erratum_fix **a8_fixes_p,
4663 unsigned int *num_a8_fixes_p,
4664 unsigned int *a8_fix_table_size_p,
4665 struct a8_erratum_reloc *a8_relocs,
4666 unsigned int num_a8_relocs,
4667 unsigned prev_num_a8_fixes,
4668 bfd_boolean *stub_changed_p)
4669 {
4670 asection *section;
4671 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4672 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4673 unsigned int num_a8_fixes = *num_a8_fixes_p;
4674 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4675
4676 if (htab == NULL)
4677 return FALSE;
4678
4679 for (section = input_bfd->sections;
4680 section != NULL;
4681 section = section->next)
4682 {
4683 bfd_byte *contents = NULL;
4684 struct _arm_elf_section_data *sec_data;
4685 unsigned int span;
4686 bfd_vma base_vma;
4687
4688 if (elf_section_type (section) != SHT_PROGBITS
4689 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4690 || (section->flags & SEC_EXCLUDE) != 0
4691 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4692 || (section->output_section == bfd_abs_section_ptr))
4693 continue;
4694
4695 base_vma = section->output_section->vma + section->output_offset;
4696
4697 if (elf_section_data (section)->this_hdr.contents != NULL)
4698 contents = elf_section_data (section)->this_hdr.contents;
4699 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4700 return TRUE;
4701
4702 sec_data = elf32_arm_section_data (section);
4703
4704 for (span = 0; span < sec_data->mapcount; span++)
4705 {
4706 unsigned int span_start = sec_data->map[span].vma;
4707 unsigned int span_end = (span == sec_data->mapcount - 1)
4708 ? section->size : sec_data->map[span + 1].vma;
4709 unsigned int i;
4710 char span_type = sec_data->map[span].type;
4711 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4712
4713 if (span_type != 't')
4714 continue;
4715
4716 /* Span is entirely within a single 4KB region: skip scanning. */
4717 if (((base_vma + span_start) & ~0xfff)
4718 == ((base_vma + span_end) & ~0xfff))
4719 continue;
4720
4721 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4722
4723 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4724 * The branch target is in the same 4KB region as the
4725 first half of the branch.
4726 * The instruction before the branch is a 32-bit
4727 length non-branch instruction. */
4728 for (i = span_start; i < span_end;)
4729 {
4730 unsigned int insn = bfd_getl16 (&contents[i]);
4731 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4732 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4733
4734 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4735 insn_32bit = TRUE;
4736
4737 if (insn_32bit)
4738 {
4739 /* Load the rest of the insn (in manual-friendly order). */
4740 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4741
4742 /* Encoding T4: B<c>.W. */
4743 is_b = (insn & 0xf800d000) == 0xf0009000;
4744 /* Encoding T1: BL<c>.W. */
4745 is_bl = (insn & 0xf800d000) == 0xf000d000;
4746 /* Encoding T2: BLX<c>.W. */
4747 is_blx = (insn & 0xf800d000) == 0xf000c000;
4748 /* Encoding T3: B<c>.W (not permitted in IT block). */
4749 is_bcc = (insn & 0xf800d000) == 0xf0008000
4750 && (insn & 0x07f00000) != 0x03800000;
4751 }
4752
4753 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4754
4755 if (((base_vma + i) & 0xfff) == 0xffe
4756 && insn_32bit
4757 && is_32bit_branch
4758 && last_was_32bit
4759 && ! last_was_branch)
4760 {
4761 bfd_signed_vma offset = 0;
4762 bfd_boolean force_target_arm = FALSE;
4763 bfd_boolean force_target_thumb = FALSE;
4764 bfd_vma target;
4765 enum elf32_arm_stub_type stub_type = arm_stub_none;
4766 struct a8_erratum_reloc key, *found;
4767 bfd_boolean use_plt = FALSE;
4768
4769 key.from = base_vma + i;
4770 found = (struct a8_erratum_reloc *)
4771 bsearch (&key, a8_relocs, num_a8_relocs,
4772 sizeof (struct a8_erratum_reloc),
4773 &a8_reloc_compare);
4774
4775 if (found)
4776 {
4777 char *error_message = NULL;
4778 struct elf_link_hash_entry *entry;
4779
4780 /* We don't care about the error returned from this
4781 function, only if there is glue or not. */
4782 entry = find_thumb_glue (info, found->sym_name,
4783 &error_message);
4784
4785 if (entry)
4786 found->non_a8_stub = TRUE;
4787
4788 /* Keep a simpler condition, for the sake of clarity. */
4789 if (htab->root.splt != NULL && found->hash != NULL
4790 && found->hash->root.plt.offset != (bfd_vma) -1)
4791 use_plt = TRUE;
4792
4793 if (found->r_type == R_ARM_THM_CALL)
4794 {
4795 if (found->branch_type == ST_BRANCH_TO_ARM
4796 || use_plt)
4797 force_target_arm = TRUE;
4798 else
4799 force_target_thumb = TRUE;
4800 }
4801 }
4802
4803 /* Check if we have an offending branch instruction. */
4804
4805 if (found && found->non_a8_stub)
4806 /* We've already made a stub for this instruction, e.g.
4807 it's a long branch or a Thumb->ARM stub. Assume that
4808 stub will suffice to work around the A8 erratum (see
4809 setting of always_after_branch above). */
4810 ;
4811 else if (is_bcc)
4812 {
4813 offset = (insn & 0x7ff) << 1;
4814 offset |= (insn & 0x3f0000) >> 4;
4815 offset |= (insn & 0x2000) ? 0x40000 : 0;
4816 offset |= (insn & 0x800) ? 0x80000 : 0;
4817 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4818 if (offset & 0x100000)
4819 offset |= ~ ((bfd_signed_vma) 0xfffff);
4820 stub_type = arm_stub_a8_veneer_b_cond;
4821 }
4822 else if (is_b || is_bl || is_blx)
4823 {
4824 int s = (insn & 0x4000000) != 0;
4825 int j1 = (insn & 0x2000) != 0;
4826 int j2 = (insn & 0x800) != 0;
4827 int i1 = !(j1 ^ s);
4828 int i2 = !(j2 ^ s);
4829
4830 offset = (insn & 0x7ff) << 1;
4831 offset |= (insn & 0x3ff0000) >> 4;
4832 offset |= i2 << 22;
4833 offset |= i1 << 23;
4834 offset |= s << 24;
4835 if (offset & 0x1000000)
4836 offset |= ~ ((bfd_signed_vma) 0xffffff);
4837
4838 if (is_blx)
4839 offset &= ~ ((bfd_signed_vma) 3);
4840
4841 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4842 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4843 }
4844
4845 if (stub_type != arm_stub_none)
4846 {
4847 bfd_vma pc_for_insn = base_vma + i + 4;
4848
4849 /* The original instruction is a BL, but the target is
4850 an ARM instruction. If we were not making a stub,
4851 the BL would have been converted to a BLX. Use the
4852 BLX stub instead in that case. */
4853 if (htab->use_blx && force_target_arm
4854 && stub_type == arm_stub_a8_veneer_bl)
4855 {
4856 stub_type = arm_stub_a8_veneer_blx;
4857 is_blx = TRUE;
4858 is_bl = FALSE;
4859 }
4860 /* Conversely, if the original instruction was
4861 BLX but the target is Thumb mode, use the BL
4862 stub. */
4863 else if (force_target_thumb
4864 && stub_type == arm_stub_a8_veneer_blx)
4865 {
4866 stub_type = arm_stub_a8_veneer_bl;
4867 is_blx = FALSE;
4868 is_bl = TRUE;
4869 }
4870
4871 if (is_blx)
4872 pc_for_insn &= ~ ((bfd_vma) 3);
4873
4874 /* If we found a relocation, use the proper destination,
4875 not the offset in the (unrelocated) instruction.
4876 Note this is always done if we switched the stub type
4877 above. */
4878 if (found)
4879 offset =
4880 (bfd_signed_vma) (found->destination - pc_for_insn);
4881
4882 /* If the stub will use a Thumb-mode branch to a
4883 PLT target, redirect it to the preceding Thumb
4884 entry point. */
4885 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4886 offset -= PLT_THUMB_STUB_SIZE;
4887
4888 target = pc_for_insn + offset;
4889
4890 /* The BLX stub is ARM-mode code. Adjust the offset to
4891 take the different PC value (+8 instead of +4) into
4892 account. */
4893 if (stub_type == arm_stub_a8_veneer_blx)
4894 offset += 4;
4895
4896 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4897 {
4898 char *stub_name = NULL;
4899
4900 if (num_a8_fixes == a8_fix_table_size)
4901 {
4902 a8_fix_table_size *= 2;
4903 a8_fixes = (struct a8_erratum_fix *)
4904 bfd_realloc (a8_fixes,
4905 sizeof (struct a8_erratum_fix)
4906 * a8_fix_table_size);
4907 }
4908
4909 if (num_a8_fixes < prev_num_a8_fixes)
4910 {
4911 /* If we're doing a subsequent scan,
4912 check if we've found the same fix as
4913 before, and try and reuse the stub
4914 name. */
4915 stub_name = a8_fixes[num_a8_fixes].stub_name;
4916 if ((a8_fixes[num_a8_fixes].section != section)
4917 || (a8_fixes[num_a8_fixes].offset != i))
4918 {
4919 free (stub_name);
4920 stub_name = NULL;
4921 *stub_changed_p = TRUE;
4922 }
4923 }
4924
4925 if (!stub_name)
4926 {
4927 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4928 if (stub_name != NULL)
4929 sprintf (stub_name, "%x:%x", section->id, i);
4930 }
4931
4932 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4933 a8_fixes[num_a8_fixes].section = section;
4934 a8_fixes[num_a8_fixes].offset = i;
4935 a8_fixes[num_a8_fixes].addend = offset;
4936 a8_fixes[num_a8_fixes].orig_insn = insn;
4937 a8_fixes[num_a8_fixes].stub_name = stub_name;
4938 a8_fixes[num_a8_fixes].stub_type = stub_type;
4939 a8_fixes[num_a8_fixes].branch_type =
4940 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4941
4942 num_a8_fixes++;
4943 }
4944 }
4945 }
4946
4947 i += insn_32bit ? 4 : 2;
4948 last_was_32bit = insn_32bit;
4949 last_was_branch = is_32bit_branch;
4950 }
4951 }
4952
4953 if (elf_section_data (section)->this_hdr.contents == NULL)
4954 free (contents);
4955 }
4956
4957 *a8_fixes_p = a8_fixes;
4958 *num_a8_fixes_p = num_a8_fixes;
4959 *a8_fix_table_size_p = a8_fix_table_size;
4960
4961 return FALSE;
4962 }
4963
4964 /* Determine and set the size of the stub section for a final link.
4965
4966 The basic idea here is to examine all the relocations looking for
4967 PC-relative calls to a target that is unreachable with a "bl"
4968 instruction. */
4969
4970 bfd_boolean
4971 elf32_arm_size_stubs (bfd *output_bfd,
4972 bfd *stub_bfd,
4973 struct bfd_link_info *info,
4974 bfd_signed_vma group_size,
4975 asection * (*add_stub_section) (const char *, asection *,
4976 unsigned int),
4977 void (*layout_sections_again) (void))
4978 {
4979 bfd_size_type stub_group_size;
4980 bfd_boolean stubs_always_after_branch;
4981 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4982 struct a8_erratum_fix *a8_fixes = NULL;
4983 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4984 struct a8_erratum_reloc *a8_relocs = NULL;
4985 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4986
4987 if (htab == NULL)
4988 return FALSE;
4989
4990 if (htab->fix_cortex_a8)
4991 {
4992 a8_fixes = (struct a8_erratum_fix *)
4993 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4994 a8_relocs = (struct a8_erratum_reloc *)
4995 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4996 }
4997
4998 /* Propagate mach to stub bfd, because it may not have been
4999 finalized when we created stub_bfd. */
5000 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5001 bfd_get_mach (output_bfd));
5002
5003 /* Stash our params away. */
5004 htab->stub_bfd = stub_bfd;
5005 htab->add_stub_section = add_stub_section;
5006 htab->layout_sections_again = layout_sections_again;
5007 stubs_always_after_branch = group_size < 0;
5008
5009 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5010 as the first half of a 32-bit branch straddling two 4K pages. This is a
5011 crude way of enforcing that. */
5012 if (htab->fix_cortex_a8)
5013 stubs_always_after_branch = 1;
5014
5015 if (group_size < 0)
5016 stub_group_size = -group_size;
5017 else
5018 stub_group_size = group_size;
5019
5020 if (stub_group_size == 1)
5021 {
5022 /* Default values. */
5023 /* Thumb branch range is +-4MB has to be used as the default
5024 maximum size (a given section can contain both ARM and Thumb
5025 code, so the worst case has to be taken into account).
5026
5027 This value is 24K less than that, which allows for 2025
5028 12-byte stubs. If we exceed that, then we will fail to link.
5029 The user will have to relink with an explicit group size
5030 option. */
5031 stub_group_size = 4170000;
5032 }
5033
5034 group_sections (htab, stub_group_size, stubs_always_after_branch);
5035
5036 /* If we're applying the cortex A8 fix, we need to determine the
5037 program header size now, because we cannot change it later --
5038 that could alter section placements. Notice the A8 erratum fix
5039 ends up requiring the section addresses to remain unchanged
5040 modulo the page size. That's something we cannot represent
5041 inside BFD, and we don't want to force the section alignment to
5042 be the page size. */
5043 if (htab->fix_cortex_a8)
5044 (*htab->layout_sections_again) ();
5045
5046 while (1)
5047 {
5048 bfd *input_bfd;
5049 unsigned int bfd_indx;
5050 asection *stub_sec;
5051 bfd_boolean stub_changed = FALSE;
5052 unsigned prev_num_a8_fixes = num_a8_fixes;
5053
5054 num_a8_fixes = 0;
5055 for (input_bfd = info->input_bfds, bfd_indx = 0;
5056 input_bfd != NULL;
5057 input_bfd = input_bfd->link.next, bfd_indx++)
5058 {
5059 Elf_Internal_Shdr *symtab_hdr;
5060 asection *section;
5061 Elf_Internal_Sym *local_syms = NULL;
5062
5063 if (!is_arm_elf (input_bfd))
5064 continue;
5065
5066 num_a8_relocs = 0;
5067
5068 /* We'll need the symbol table in a second. */
5069 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5070 if (symtab_hdr->sh_info == 0)
5071 continue;
5072
5073 /* Walk over each section attached to the input bfd. */
5074 for (section = input_bfd->sections;
5075 section != NULL;
5076 section = section->next)
5077 {
5078 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5079
5080 /* If there aren't any relocs, then there's nothing more
5081 to do. */
5082 if ((section->flags & SEC_RELOC) == 0
5083 || section->reloc_count == 0
5084 || (section->flags & SEC_CODE) == 0)
5085 continue;
5086
5087 /* If this section is a link-once section that will be
5088 discarded, then don't create any stubs. */
5089 if (section->output_section == NULL
5090 || section->output_section->owner != output_bfd)
5091 continue;
5092
5093 /* Get the relocs. */
5094 internal_relocs
5095 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5096 NULL, info->keep_memory);
5097 if (internal_relocs == NULL)
5098 goto error_ret_free_local;
5099
5100 /* Now examine each relocation. */
5101 irela = internal_relocs;
5102 irelaend = irela + section->reloc_count;
5103 for (; irela < irelaend; irela++)
5104 {
5105 unsigned int r_type, r_indx;
5106 enum elf32_arm_stub_type stub_type;
5107 struct elf32_arm_stub_hash_entry *stub_entry;
5108 asection *sym_sec;
5109 bfd_vma sym_value;
5110 bfd_vma destination;
5111 struct elf32_arm_link_hash_entry *hash;
5112 const char *sym_name;
5113 char *stub_name;
5114 const asection *id_sec;
5115 unsigned char st_type;
5116 enum arm_st_branch_type branch_type;
5117 bfd_boolean created_stub = FALSE;
5118
5119 r_type = ELF32_R_TYPE (irela->r_info);
5120 r_indx = ELF32_R_SYM (irela->r_info);
5121
5122 if (r_type >= (unsigned int) R_ARM_max)
5123 {
5124 bfd_set_error (bfd_error_bad_value);
5125 error_ret_free_internal:
5126 if (elf_section_data (section)->relocs == NULL)
5127 free (internal_relocs);
5128 goto error_ret_free_local;
5129 }
5130
5131 hash = NULL;
5132 if (r_indx >= symtab_hdr->sh_info)
5133 hash = elf32_arm_hash_entry
5134 (elf_sym_hashes (input_bfd)
5135 [r_indx - symtab_hdr->sh_info]);
5136
5137 /* Only look for stubs on branch instructions, or
5138 non-relaxed TLSCALL */
5139 if ((r_type != (unsigned int) R_ARM_CALL)
5140 && (r_type != (unsigned int) R_ARM_THM_CALL)
5141 && (r_type != (unsigned int) R_ARM_JUMP24)
5142 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5143 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5144 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5145 && (r_type != (unsigned int) R_ARM_PLT32)
5146 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5147 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5148 && r_type == elf32_arm_tls_transition
5149 (info, r_type, &hash->root)
5150 && ((hash ? hash->tls_type
5151 : (elf32_arm_local_got_tls_type
5152 (input_bfd)[r_indx]))
5153 & GOT_TLS_GDESC) != 0))
5154 continue;
5155
5156 /* Now determine the call target, its name, value,
5157 section. */
5158 sym_sec = NULL;
5159 sym_value = 0;
5160 destination = 0;
5161 sym_name = NULL;
5162
5163 if (r_type == (unsigned int) R_ARM_TLS_CALL
5164 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5165 {
5166 /* A non-relaxed TLS call. The target is the
5167 plt-resident trampoline and nothing to do
5168 with the symbol. */
5169 BFD_ASSERT (htab->tls_trampoline > 0);
5170 sym_sec = htab->root.splt;
5171 sym_value = htab->tls_trampoline;
5172 hash = 0;
5173 st_type = STT_FUNC;
5174 branch_type = ST_BRANCH_TO_ARM;
5175 }
5176 else if (!hash)
5177 {
5178 /* It's a local symbol. */
5179 Elf_Internal_Sym *sym;
5180
5181 if (local_syms == NULL)
5182 {
5183 local_syms
5184 = (Elf_Internal_Sym *) symtab_hdr->contents;
5185 if (local_syms == NULL)
5186 local_syms
5187 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5188 symtab_hdr->sh_info, 0,
5189 NULL, NULL, NULL);
5190 if (local_syms == NULL)
5191 goto error_ret_free_internal;
5192 }
5193
5194 sym = local_syms + r_indx;
5195 if (sym->st_shndx == SHN_UNDEF)
5196 sym_sec = bfd_und_section_ptr;
5197 else if (sym->st_shndx == SHN_ABS)
5198 sym_sec = bfd_abs_section_ptr;
5199 else if (sym->st_shndx == SHN_COMMON)
5200 sym_sec = bfd_com_section_ptr;
5201 else
5202 sym_sec =
5203 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5204
5205 if (!sym_sec)
5206 /* This is an undefined symbol. It can never
5207 be resolved. */
5208 continue;
5209
5210 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5211 sym_value = sym->st_value;
5212 destination = (sym_value + irela->r_addend
5213 + sym_sec->output_offset
5214 + sym_sec->output_section->vma);
5215 st_type = ELF_ST_TYPE (sym->st_info);
5216 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5217 sym_name
5218 = bfd_elf_string_from_elf_section (input_bfd,
5219 symtab_hdr->sh_link,
5220 sym->st_name);
5221 }
5222 else
5223 {
5224 /* It's an external symbol. */
5225 while (hash->root.root.type == bfd_link_hash_indirect
5226 || hash->root.root.type == bfd_link_hash_warning)
5227 hash = ((struct elf32_arm_link_hash_entry *)
5228 hash->root.root.u.i.link);
5229
5230 if (hash->root.root.type == bfd_link_hash_defined
5231 || hash->root.root.type == bfd_link_hash_defweak)
5232 {
5233 sym_sec = hash->root.root.u.def.section;
5234 sym_value = hash->root.root.u.def.value;
5235
5236 struct elf32_arm_link_hash_table *globals =
5237 elf32_arm_hash_table (info);
5238
5239 /* For a destination in a shared library,
5240 use the PLT stub as target address to
5241 decide whether a branch stub is
5242 needed. */
5243 if (globals != NULL
5244 && globals->root.splt != NULL
5245 && hash != NULL
5246 && hash->root.plt.offset != (bfd_vma) -1)
5247 {
5248 sym_sec = globals->root.splt;
5249 sym_value = hash->root.plt.offset;
5250 if (sym_sec->output_section != NULL)
5251 destination = (sym_value
5252 + sym_sec->output_offset
5253 + sym_sec->output_section->vma);
5254 }
5255 else if (sym_sec->output_section != NULL)
5256 destination = (sym_value + irela->r_addend
5257 + sym_sec->output_offset
5258 + sym_sec->output_section->vma);
5259 }
5260 else if ((hash->root.root.type == bfd_link_hash_undefined)
5261 || (hash->root.root.type == bfd_link_hash_undefweak))
5262 {
5263 /* For a shared library, use the PLT stub as
5264 target address to decide whether a long
5265 branch stub is needed.
5266 For absolute code, they cannot be handled. */
5267 struct elf32_arm_link_hash_table *globals =
5268 elf32_arm_hash_table (info);
5269
5270 if (globals != NULL
5271 && globals->root.splt != NULL
5272 && hash != NULL
5273 && hash->root.plt.offset != (bfd_vma) -1)
5274 {
5275 sym_sec = globals->root.splt;
5276 sym_value = hash->root.plt.offset;
5277 if (sym_sec->output_section != NULL)
5278 destination = (sym_value
5279 + sym_sec->output_offset
5280 + sym_sec->output_section->vma);
5281 }
5282 else
5283 continue;
5284 }
5285 else
5286 {
5287 bfd_set_error (bfd_error_bad_value);
5288 goto error_ret_free_internal;
5289 }
5290 st_type = hash->root.type;
5291 branch_type = hash->root.target_internal;
5292 sym_name = hash->root.root.root.string;
5293 }
5294
5295 do
5296 {
5297 /* Determine what (if any) linker stub is needed. */
5298 stub_type = arm_type_of_stub (info, section, irela,
5299 st_type, &branch_type,
5300 hash, destination, sym_sec,
5301 input_bfd, sym_name);
5302 if (stub_type == arm_stub_none)
5303 break;
5304
5305 /* Support for grouping stub sections. */
5306 id_sec = htab->stub_group[section->id].link_sec;
5307
5308 /* Get the name of this stub. */
5309 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5310 irela, stub_type);
5311 if (!stub_name)
5312 goto error_ret_free_internal;
5313
5314 /* We've either created a stub for this reloc already,
5315 or we are about to. */
5316 created_stub = TRUE;
5317
5318 stub_entry = arm_stub_hash_lookup
5319 (&htab->stub_hash_table, stub_name,
5320 FALSE, FALSE);
5321 if (stub_entry != NULL)
5322 {
5323 /* The proper stub has already been created. */
5324 free (stub_name);
5325 stub_entry->target_value = sym_value;
5326 break;
5327 }
5328
5329 stub_entry = elf32_arm_add_stub (stub_name, section,
5330 htab);
5331 if (stub_entry == NULL)
5332 {
5333 free (stub_name);
5334 goto error_ret_free_internal;
5335 }
5336
5337 stub_entry->target_value = sym_value;
5338 stub_entry->target_section = sym_sec;
5339 stub_entry->stub_type = stub_type;
5340 stub_entry->h = hash;
5341 stub_entry->branch_type = branch_type;
5342
5343 if (sym_name == NULL)
5344 sym_name = "unnamed";
5345 stub_entry->output_name = (char *)
5346 bfd_alloc (htab->stub_bfd,
5347 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5348 + strlen (sym_name));
5349 if (stub_entry->output_name == NULL)
5350 {
5351 free (stub_name);
5352 goto error_ret_free_internal;
5353 }
5354
5355 /* For historical reasons, use the existing names for
5356 ARM-to-Thumb and Thumb-to-ARM stubs. */
5357 if ((r_type == (unsigned int) R_ARM_THM_CALL
5358 || r_type == (unsigned int) R_ARM_THM_JUMP24
5359 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5360 && branch_type == ST_BRANCH_TO_ARM)
5361 sprintf (stub_entry->output_name,
5362 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5363 else if ((r_type == (unsigned int) R_ARM_CALL
5364 || r_type == (unsigned int) R_ARM_JUMP24)
5365 && branch_type == ST_BRANCH_TO_THUMB)
5366 sprintf (stub_entry->output_name,
5367 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5368 else
5369 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5370 sym_name);
5371
5372 stub_changed = TRUE;
5373 }
5374 while (0);
5375
5376 /* Look for relocations which might trigger Cortex-A8
5377 erratum. */
5378 if (htab->fix_cortex_a8
5379 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5380 || r_type == (unsigned int) R_ARM_THM_JUMP19
5381 || r_type == (unsigned int) R_ARM_THM_CALL
5382 || r_type == (unsigned int) R_ARM_THM_XPC22))
5383 {
5384 bfd_vma from = section->output_section->vma
5385 + section->output_offset
5386 + irela->r_offset;
5387
5388 if ((from & 0xfff) == 0xffe)
5389 {
5390 /* Found a candidate. Note we haven't checked the
5391 destination is within 4K here: if we do so (and
5392 don't create an entry in a8_relocs) we can't tell
5393 that a branch should have been relocated when
5394 scanning later. */
5395 if (num_a8_relocs == a8_reloc_table_size)
5396 {
5397 a8_reloc_table_size *= 2;
5398 a8_relocs = (struct a8_erratum_reloc *)
5399 bfd_realloc (a8_relocs,
5400 sizeof (struct a8_erratum_reloc)
5401 * a8_reloc_table_size);
5402 }
5403
5404 a8_relocs[num_a8_relocs].from = from;
5405 a8_relocs[num_a8_relocs].destination = destination;
5406 a8_relocs[num_a8_relocs].r_type = r_type;
5407 a8_relocs[num_a8_relocs].branch_type = branch_type;
5408 a8_relocs[num_a8_relocs].sym_name = sym_name;
5409 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5410 a8_relocs[num_a8_relocs].hash = hash;
5411
5412 num_a8_relocs++;
5413 }
5414 }
5415 }
5416
5417 /* We're done with the internal relocs, free them. */
5418 if (elf_section_data (section)->relocs == NULL)
5419 free (internal_relocs);
5420 }
5421
5422 if (htab->fix_cortex_a8)
5423 {
5424 /* Sort relocs which might apply to Cortex-A8 erratum. */
5425 qsort (a8_relocs, num_a8_relocs,
5426 sizeof (struct a8_erratum_reloc),
5427 &a8_reloc_compare);
5428
5429 /* Scan for branches which might trigger Cortex-A8 erratum. */
5430 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5431 &num_a8_fixes, &a8_fix_table_size,
5432 a8_relocs, num_a8_relocs,
5433 prev_num_a8_fixes, &stub_changed)
5434 != 0)
5435 goto error_ret_free_local;
5436 }
5437 }
5438
5439 if (prev_num_a8_fixes != num_a8_fixes)
5440 stub_changed = TRUE;
5441
5442 if (!stub_changed)
5443 break;
5444
5445 /* OK, we've added some stubs. Find out the new size of the
5446 stub sections. */
5447 for (stub_sec = htab->stub_bfd->sections;
5448 stub_sec != NULL;
5449 stub_sec = stub_sec->next)
5450 {
5451 /* Ignore non-stub sections. */
5452 if (!strstr (stub_sec->name, STUB_SUFFIX))
5453 continue;
5454
5455 stub_sec->size = 0;
5456 }
5457
5458 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5459
5460 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5461 if (htab->fix_cortex_a8)
5462 for (i = 0; i < num_a8_fixes; i++)
5463 {
5464 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5465 a8_fixes[i].section, htab);
5466
5467 if (stub_sec == NULL)
5468 goto error_ret_free_local;
5469
5470 stub_sec->size
5471 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5472 NULL);
5473 }
5474
5475
5476 /* Ask the linker to do its stuff. */
5477 (*htab->layout_sections_again) ();
5478 }
5479
5480 /* Add stubs for Cortex-A8 erratum fixes now. */
5481 if (htab->fix_cortex_a8)
5482 {
5483 for (i = 0; i < num_a8_fixes; i++)
5484 {
5485 struct elf32_arm_stub_hash_entry *stub_entry;
5486 char *stub_name = a8_fixes[i].stub_name;
5487 asection *section = a8_fixes[i].section;
5488 unsigned int section_id = a8_fixes[i].section->id;
5489 asection *link_sec = htab->stub_group[section_id].link_sec;
5490 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5491 const insn_sequence *template_sequence;
5492 int template_size, size = 0;
5493
5494 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5495 TRUE, FALSE);
5496 if (stub_entry == NULL)
5497 {
5498 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5499 section->owner,
5500 stub_name);
5501 return FALSE;
5502 }
5503
5504 stub_entry->stub_sec = stub_sec;
5505 stub_entry->stub_offset = 0;
5506 stub_entry->id_sec = link_sec;
5507 stub_entry->stub_type = a8_fixes[i].stub_type;
5508 stub_entry->target_section = a8_fixes[i].section;
5509 stub_entry->target_value = a8_fixes[i].offset;
5510 stub_entry->target_addend = a8_fixes[i].addend;
5511 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5512 stub_entry->branch_type = a8_fixes[i].branch_type;
5513
5514 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5515 &template_sequence,
5516 &template_size);
5517
5518 stub_entry->stub_size = size;
5519 stub_entry->stub_template = template_sequence;
5520 stub_entry->stub_template_size = template_size;
5521 }
5522
5523 /* Stash the Cortex-A8 erratum fix array for use later in
5524 elf32_arm_write_section(). */
5525 htab->a8_erratum_fixes = a8_fixes;
5526 htab->num_a8_erratum_fixes = num_a8_fixes;
5527 }
5528 else
5529 {
5530 htab->a8_erratum_fixes = NULL;
5531 htab->num_a8_erratum_fixes = 0;
5532 }
5533 return TRUE;
5534
5535 error_ret_free_local:
5536 return FALSE;
5537 }
5538
5539 /* Build all the stubs associated with the current output file. The
5540 stubs are kept in a hash table attached to the main linker hash
5541 table. We also set up the .plt entries for statically linked PIC
5542 functions here. This function is called via arm_elf_finish in the
5543 linker. */
5544
5545 bfd_boolean
5546 elf32_arm_build_stubs (struct bfd_link_info *info)
5547 {
5548 asection *stub_sec;
5549 struct bfd_hash_table *table;
5550 struct elf32_arm_link_hash_table *htab;
5551
5552 htab = elf32_arm_hash_table (info);
5553 if (htab == NULL)
5554 return FALSE;
5555
5556 for (stub_sec = htab->stub_bfd->sections;
5557 stub_sec != NULL;
5558 stub_sec = stub_sec->next)
5559 {
5560 bfd_size_type size;
5561
5562 /* Ignore non-stub sections. */
5563 if (!strstr (stub_sec->name, STUB_SUFFIX))
5564 continue;
5565
5566 /* Allocate memory to hold the linker stubs. */
5567 size = stub_sec->size;
5568 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5569 if (stub_sec->contents == NULL && size != 0)
5570 return FALSE;
5571 stub_sec->size = 0;
5572 }
5573
5574 /* Build the stubs as directed by the stub hash table. */
5575 table = &htab->stub_hash_table;
5576 bfd_hash_traverse (table, arm_build_one_stub, info);
5577 if (htab->fix_cortex_a8)
5578 {
5579 /* Place the cortex a8 stubs last. */
5580 htab->fix_cortex_a8 = -1;
5581 bfd_hash_traverse (table, arm_build_one_stub, info);
5582 }
5583
5584 return TRUE;
5585 }
5586
5587 /* Locate the Thumb encoded calling stub for NAME. */
5588
5589 static struct elf_link_hash_entry *
5590 find_thumb_glue (struct bfd_link_info *link_info,
5591 const char *name,
5592 char **error_message)
5593 {
5594 char *tmp_name;
5595 struct elf_link_hash_entry *hash;
5596 struct elf32_arm_link_hash_table *hash_table;
5597
5598 /* We need a pointer to the armelf specific hash table. */
5599 hash_table = elf32_arm_hash_table (link_info);
5600 if (hash_table == NULL)
5601 return NULL;
5602
5603 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5604 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5605
5606 BFD_ASSERT (tmp_name);
5607
5608 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5609
5610 hash = elf_link_hash_lookup
5611 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5612
5613 if (hash == NULL
5614 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5615 tmp_name, name) == -1)
5616 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5617
5618 free (tmp_name);
5619
5620 return hash;
5621 }
5622
5623 /* Locate the ARM encoded calling stub for NAME. */
5624
5625 static struct elf_link_hash_entry *
5626 find_arm_glue (struct bfd_link_info *link_info,
5627 const char *name,
5628 char **error_message)
5629 {
5630 char *tmp_name;
5631 struct elf_link_hash_entry *myh;
5632 struct elf32_arm_link_hash_table *hash_table;
5633
5634 /* We need a pointer to the elfarm specific hash table. */
5635 hash_table = elf32_arm_hash_table (link_info);
5636 if (hash_table == NULL)
5637 return NULL;
5638
5639 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5640 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5641
5642 BFD_ASSERT (tmp_name);
5643
5644 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5645
5646 myh = elf_link_hash_lookup
5647 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5648
5649 if (myh == NULL
5650 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5651 tmp_name, name) == -1)
5652 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5653
5654 free (tmp_name);
5655
5656 return myh;
5657 }
5658
5659 /* ARM->Thumb glue (static images):
5660
5661 .arm
5662 __func_from_arm:
5663 ldr r12, __func_addr
5664 bx r12
5665 __func_addr:
5666 .word func @ behave as if you saw a ARM_32 reloc.
5667
5668 (v5t static images)
5669 .arm
5670 __func_from_arm:
5671 ldr pc, __func_addr
5672 __func_addr:
5673 .word func @ behave as if you saw a ARM_32 reloc.
5674
5675 (relocatable images)
5676 .arm
5677 __func_from_arm:
5678 ldr r12, __func_offset
5679 add r12, r12, pc
5680 bx r12
5681 __func_offset:
5682 .word func - . */
5683
5684 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5685 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5686 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5687 static const insn32 a2t3_func_addr_insn = 0x00000001;
5688
5689 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5690 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5691 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5692
5693 #define ARM2THUMB_PIC_GLUE_SIZE 16
5694 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5695 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5696 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5697
5698 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5699
5700 .thumb .thumb
5701 .align 2 .align 2
5702 __func_from_thumb: __func_from_thumb:
5703 bx pc push {r6, lr}
5704 nop ldr r6, __func_addr
5705 .arm mov lr, pc
5706 b func bx r6
5707 .arm
5708 ;; back_to_thumb
5709 ldmia r13! {r6, lr}
5710 bx lr
5711 __func_addr:
5712 .word func */
5713
5714 #define THUMB2ARM_GLUE_SIZE 8
5715 static const insn16 t2a1_bx_pc_insn = 0x4778;
5716 static const insn16 t2a2_noop_insn = 0x46c0;
5717 static const insn32 t2a3_b_insn = 0xea000000;
5718
5719 #define VFP11_ERRATUM_VENEER_SIZE 8
5720
5721 #define ARM_BX_VENEER_SIZE 12
5722 static const insn32 armbx1_tst_insn = 0xe3100001;
5723 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5724 static const insn32 armbx3_bx_insn = 0xe12fff10;
5725
5726 #ifndef ELFARM_NABI_C_INCLUDED
5727 static void
5728 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5729 {
5730 asection * s;
5731 bfd_byte * contents;
5732
5733 if (size == 0)
5734 {
5735 /* Do not include empty glue sections in the output. */
5736 if (abfd != NULL)
5737 {
5738 s = bfd_get_linker_section (abfd, name);
5739 if (s != NULL)
5740 s->flags |= SEC_EXCLUDE;
5741 }
5742 return;
5743 }
5744
5745 BFD_ASSERT (abfd != NULL);
5746
5747 s = bfd_get_linker_section (abfd, name);
5748 BFD_ASSERT (s != NULL);
5749
5750 contents = (bfd_byte *) bfd_alloc (abfd, size);
5751
5752 BFD_ASSERT (s->size == size);
5753 s->contents = contents;
5754 }
5755
5756 bfd_boolean
5757 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5758 {
5759 struct elf32_arm_link_hash_table * globals;
5760
5761 globals = elf32_arm_hash_table (info);
5762 BFD_ASSERT (globals != NULL);
5763
5764 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5765 globals->arm_glue_size,
5766 ARM2THUMB_GLUE_SECTION_NAME);
5767
5768 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5769 globals->thumb_glue_size,
5770 THUMB2ARM_GLUE_SECTION_NAME);
5771
5772 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5773 globals->vfp11_erratum_glue_size,
5774 VFP11_ERRATUM_VENEER_SECTION_NAME);
5775
5776 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5777 globals->bx_glue_size,
5778 ARM_BX_GLUE_SECTION_NAME);
5779
5780 return TRUE;
5781 }
5782
5783 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5784 returns the symbol identifying the stub. */
5785
5786 static struct elf_link_hash_entry *
5787 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5788 struct elf_link_hash_entry * h)
5789 {
5790 const char * name = h->root.root.string;
5791 asection * s;
5792 char * tmp_name;
5793 struct elf_link_hash_entry * myh;
5794 struct bfd_link_hash_entry * bh;
5795 struct elf32_arm_link_hash_table * globals;
5796 bfd_vma val;
5797 bfd_size_type size;
5798
5799 globals = elf32_arm_hash_table (link_info);
5800 BFD_ASSERT (globals != NULL);
5801 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5802
5803 s = bfd_get_linker_section
5804 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5805
5806 BFD_ASSERT (s != NULL);
5807
5808 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5809 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5810
5811 BFD_ASSERT (tmp_name);
5812
5813 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5814
5815 myh = elf_link_hash_lookup
5816 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5817
5818 if (myh != NULL)
5819 {
5820 /* We've already seen this guy. */
5821 free (tmp_name);
5822 return myh;
5823 }
5824
5825 /* The only trick here is using hash_table->arm_glue_size as the value.
5826 Even though the section isn't allocated yet, this is where we will be
5827 putting it. The +1 on the value marks that the stub has not been
5828 output yet - not that it is a Thumb function. */
5829 bh = NULL;
5830 val = globals->arm_glue_size + 1;
5831 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5832 tmp_name, BSF_GLOBAL, s, val,
5833 NULL, TRUE, FALSE, &bh);
5834
5835 myh = (struct elf_link_hash_entry *) bh;
5836 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5837 myh->forced_local = 1;
5838
5839 free (tmp_name);
5840
5841 if (link_info->shared || globals->root.is_relocatable_executable
5842 || globals->pic_veneer)
5843 size = ARM2THUMB_PIC_GLUE_SIZE;
5844 else if (globals->use_blx)
5845 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5846 else
5847 size = ARM2THUMB_STATIC_GLUE_SIZE;
5848
5849 s->size += size;
5850 globals->arm_glue_size += size;
5851
5852 return myh;
5853 }
5854
5855 /* Allocate space for ARMv4 BX veneers. */
5856
5857 static void
5858 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5859 {
5860 asection * s;
5861 struct elf32_arm_link_hash_table *globals;
5862 char *tmp_name;
5863 struct elf_link_hash_entry *myh;
5864 struct bfd_link_hash_entry *bh;
5865 bfd_vma val;
5866
5867 /* BX PC does not need a veneer. */
5868 if (reg == 15)
5869 return;
5870
5871 globals = elf32_arm_hash_table (link_info);
5872 BFD_ASSERT (globals != NULL);
5873 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5874
5875 /* Check if this veneer has already been allocated. */
5876 if (globals->bx_glue_offset[reg])
5877 return;
5878
5879 s = bfd_get_linker_section
5880 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5881
5882 BFD_ASSERT (s != NULL);
5883
5884 /* Add symbol for veneer. */
5885 tmp_name = (char *)
5886 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5887
5888 BFD_ASSERT (tmp_name);
5889
5890 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5891
5892 myh = elf_link_hash_lookup
5893 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5894
5895 BFD_ASSERT (myh == NULL);
5896
5897 bh = NULL;
5898 val = globals->bx_glue_size;
5899 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5900 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5901 NULL, TRUE, FALSE, &bh);
5902
5903 myh = (struct elf_link_hash_entry *) bh;
5904 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5905 myh->forced_local = 1;
5906
5907 s->size += ARM_BX_VENEER_SIZE;
5908 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5909 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5910 }
5911
5912
5913 /* Add an entry to the code/data map for section SEC. */
5914
5915 static void
5916 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5917 {
5918 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5919 unsigned int newidx;
5920
5921 if (sec_data->map == NULL)
5922 {
5923 sec_data->map = (elf32_arm_section_map *)
5924 bfd_malloc (sizeof (elf32_arm_section_map));
5925 sec_data->mapcount = 0;
5926 sec_data->mapsize = 1;
5927 }
5928
5929 newidx = sec_data->mapcount++;
5930
5931 if (sec_data->mapcount > sec_data->mapsize)
5932 {
5933 sec_data->mapsize *= 2;
5934 sec_data->map = (elf32_arm_section_map *)
5935 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5936 * sizeof (elf32_arm_section_map));
5937 }
5938
5939 if (sec_data->map)
5940 {
5941 sec_data->map[newidx].vma = vma;
5942 sec_data->map[newidx].type = type;
5943 }
5944 }
5945
5946
5947 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5948 veneers are handled for now. */
5949
5950 static bfd_vma
5951 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5952 elf32_vfp11_erratum_list *branch,
5953 bfd *branch_bfd,
5954 asection *branch_sec,
5955 unsigned int offset)
5956 {
5957 asection *s;
5958 struct elf32_arm_link_hash_table *hash_table;
5959 char *tmp_name;
5960 struct elf_link_hash_entry *myh;
5961 struct bfd_link_hash_entry *bh;
5962 bfd_vma val;
5963 struct _arm_elf_section_data *sec_data;
5964 elf32_vfp11_erratum_list *newerr;
5965
5966 hash_table = elf32_arm_hash_table (link_info);
5967 BFD_ASSERT (hash_table != NULL);
5968 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5969
5970 s = bfd_get_linker_section
5971 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5972
5973 sec_data = elf32_arm_section_data (s);
5974
5975 BFD_ASSERT (s != NULL);
5976
5977 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5978 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5979
5980 BFD_ASSERT (tmp_name);
5981
5982 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5983 hash_table->num_vfp11_fixes);
5984
5985 myh = elf_link_hash_lookup
5986 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5987
5988 BFD_ASSERT (myh == NULL);
5989
5990 bh = NULL;
5991 val = hash_table->vfp11_erratum_glue_size;
5992 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5993 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5994 NULL, TRUE, FALSE, &bh);
5995
5996 myh = (struct elf_link_hash_entry *) bh;
5997 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5998 myh->forced_local = 1;
5999
6000 /* Link veneer back to calling location. */
6001 sec_data->erratumcount += 1;
6002 newerr = (elf32_vfp11_erratum_list *)
6003 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6004
6005 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6006 newerr->vma = -1;
6007 newerr->u.v.branch = branch;
6008 newerr->u.v.id = hash_table->num_vfp11_fixes;
6009 branch->u.b.veneer = newerr;
6010
6011 newerr->next = sec_data->erratumlist;
6012 sec_data->erratumlist = newerr;
6013
6014 /* A symbol for the return from the veneer. */
6015 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6016 hash_table->num_vfp11_fixes);
6017
6018 myh = elf_link_hash_lookup
6019 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6020
6021 if (myh != NULL)
6022 abort ();
6023
6024 bh = NULL;
6025 val = offset + 4;
6026 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6027 branch_sec, val, NULL, TRUE, FALSE, &bh);
6028
6029 myh = (struct elf_link_hash_entry *) bh;
6030 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6031 myh->forced_local = 1;
6032
6033 free (tmp_name);
6034
6035 /* Generate a mapping symbol for the veneer section, and explicitly add an
6036 entry for that symbol to the code/data map for the section. */
6037 if (hash_table->vfp11_erratum_glue_size == 0)
6038 {
6039 bh = NULL;
6040 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6041 ever requires this erratum fix. */
6042 _bfd_generic_link_add_one_symbol (link_info,
6043 hash_table->bfd_of_glue_owner, "$a",
6044 BSF_LOCAL, s, 0, NULL,
6045 TRUE, FALSE, &bh);
6046
6047 myh = (struct elf_link_hash_entry *) bh;
6048 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6049 myh->forced_local = 1;
6050
6051 /* The elf32_arm_init_maps function only cares about symbols from input
6052 BFDs. We must make a note of this generated mapping symbol
6053 ourselves so that code byteswapping works properly in
6054 elf32_arm_write_section. */
6055 elf32_arm_section_map_add (s, 'a', 0);
6056 }
6057
6058 s->size += VFP11_ERRATUM_VENEER_SIZE;
6059 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6060 hash_table->num_vfp11_fixes++;
6061
6062 /* The offset of the veneer. */
6063 return val;
6064 }
6065
6066 #define ARM_GLUE_SECTION_FLAGS \
6067 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6068 | SEC_READONLY | SEC_LINKER_CREATED)
6069
6070 /* Create a fake section for use by the ARM backend of the linker. */
6071
6072 static bfd_boolean
6073 arm_make_glue_section (bfd * abfd, const char * name)
6074 {
6075 asection * sec;
6076
6077 sec = bfd_get_linker_section (abfd, name);
6078 if (sec != NULL)
6079 /* Already made. */
6080 return TRUE;
6081
6082 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6083
6084 if (sec == NULL
6085 || !bfd_set_section_alignment (abfd, sec, 2))
6086 return FALSE;
6087
6088 /* Set the gc mark to prevent the section from being removed by garbage
6089 collection, despite the fact that no relocs refer to this section. */
6090 sec->gc_mark = 1;
6091
6092 return TRUE;
6093 }
6094
6095 /* Set size of .plt entries. This function is called from the
6096 linker scripts in ld/emultempl/{armelf}.em. */
6097
6098 void
6099 bfd_elf32_arm_use_long_plt (void)
6100 {
6101 elf32_arm_use_long_plt_entry = TRUE;
6102 }
6103
6104 /* Add the glue sections to ABFD. This function is called from the
6105 linker scripts in ld/emultempl/{armelf}.em. */
6106
6107 bfd_boolean
6108 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6109 struct bfd_link_info *info)
6110 {
6111 /* If we are only performing a partial
6112 link do not bother adding the glue. */
6113 if (info->relocatable)
6114 return TRUE;
6115
6116 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6117 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6118 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6119 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6120 }
6121
6122 /* Select a BFD to be used to hold the sections used by the glue code.
6123 This function is called from the linker scripts in ld/emultempl/
6124 {armelf/pe}.em. */
6125
6126 bfd_boolean
6127 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6128 {
6129 struct elf32_arm_link_hash_table *globals;
6130
6131 /* If we are only performing a partial link
6132 do not bother getting a bfd to hold the glue. */
6133 if (info->relocatable)
6134 return TRUE;
6135
6136 /* Make sure we don't attach the glue sections to a dynamic object. */
6137 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6138
6139 globals = elf32_arm_hash_table (info);
6140 BFD_ASSERT (globals != NULL);
6141
6142 if (globals->bfd_of_glue_owner != NULL)
6143 return TRUE;
6144
6145 /* Save the bfd for later use. */
6146 globals->bfd_of_glue_owner = abfd;
6147
6148 return TRUE;
6149 }
6150
6151 static void
6152 check_use_blx (struct elf32_arm_link_hash_table *globals)
6153 {
6154 int cpu_arch;
6155
6156 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6157 Tag_CPU_arch);
6158
6159 if (globals->fix_arm1176)
6160 {
6161 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6162 globals->use_blx = 1;
6163 }
6164 else
6165 {
6166 if (cpu_arch > TAG_CPU_ARCH_V4T)
6167 globals->use_blx = 1;
6168 }
6169 }
6170
6171 bfd_boolean
6172 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6173 struct bfd_link_info *link_info)
6174 {
6175 Elf_Internal_Shdr *symtab_hdr;
6176 Elf_Internal_Rela *internal_relocs = NULL;
6177 Elf_Internal_Rela *irel, *irelend;
6178 bfd_byte *contents = NULL;
6179
6180 asection *sec;
6181 struct elf32_arm_link_hash_table *globals;
6182
6183 /* If we are only performing a partial link do not bother
6184 to construct any glue. */
6185 if (link_info->relocatable)
6186 return TRUE;
6187
6188 /* Here we have a bfd that is to be included on the link. We have a
6189 hook to do reloc rummaging, before section sizes are nailed down. */
6190 globals = elf32_arm_hash_table (link_info);
6191 BFD_ASSERT (globals != NULL);
6192
6193 check_use_blx (globals);
6194
6195 if (globals->byteswap_code && !bfd_big_endian (abfd))
6196 {
6197 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6198 abfd);
6199 return FALSE;
6200 }
6201
6202 /* PR 5398: If we have not decided to include any loadable sections in
6203 the output then we will not have a glue owner bfd. This is OK, it
6204 just means that there is nothing else for us to do here. */
6205 if (globals->bfd_of_glue_owner == NULL)
6206 return TRUE;
6207
6208 /* Rummage around all the relocs and map the glue vectors. */
6209 sec = abfd->sections;
6210
6211 if (sec == NULL)
6212 return TRUE;
6213
6214 for (; sec != NULL; sec = sec->next)
6215 {
6216 if (sec->reloc_count == 0)
6217 continue;
6218
6219 if ((sec->flags & SEC_EXCLUDE) != 0)
6220 continue;
6221
6222 symtab_hdr = & elf_symtab_hdr (abfd);
6223
6224 /* Load the relocs. */
6225 internal_relocs
6226 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6227
6228 if (internal_relocs == NULL)
6229 goto error_return;
6230
6231 irelend = internal_relocs + sec->reloc_count;
6232 for (irel = internal_relocs; irel < irelend; irel++)
6233 {
6234 long r_type;
6235 unsigned long r_index;
6236
6237 struct elf_link_hash_entry *h;
6238
6239 r_type = ELF32_R_TYPE (irel->r_info);
6240 r_index = ELF32_R_SYM (irel->r_info);
6241
6242 /* These are the only relocation types we care about. */
6243 if ( r_type != R_ARM_PC24
6244 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6245 continue;
6246
6247 /* Get the section contents if we haven't done so already. */
6248 if (contents == NULL)
6249 {
6250 /* Get cached copy if it exists. */
6251 if (elf_section_data (sec)->this_hdr.contents != NULL)
6252 contents = elf_section_data (sec)->this_hdr.contents;
6253 else
6254 {
6255 /* Go get them off disk. */
6256 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6257 goto error_return;
6258 }
6259 }
6260
6261 if (r_type == R_ARM_V4BX)
6262 {
6263 int reg;
6264
6265 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6266 record_arm_bx_glue (link_info, reg);
6267 continue;
6268 }
6269
6270 /* If the relocation is not against a symbol it cannot concern us. */
6271 h = NULL;
6272
6273 /* We don't care about local symbols. */
6274 if (r_index < symtab_hdr->sh_info)
6275 continue;
6276
6277 /* This is an external symbol. */
6278 r_index -= symtab_hdr->sh_info;
6279 h = (struct elf_link_hash_entry *)
6280 elf_sym_hashes (abfd)[r_index];
6281
6282 /* If the relocation is against a static symbol it must be within
6283 the current section and so cannot be a cross ARM/Thumb relocation. */
6284 if (h == NULL)
6285 continue;
6286
6287 /* If the call will go through a PLT entry then we do not need
6288 glue. */
6289 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6290 continue;
6291
6292 switch (r_type)
6293 {
6294 case R_ARM_PC24:
6295 /* This one is a call from arm code. We need to look up
6296 the target of the call. If it is a thumb target, we
6297 insert glue. */
6298 if (h->target_internal == ST_BRANCH_TO_THUMB)
6299 record_arm_to_thumb_glue (link_info, h);
6300 break;
6301
6302 default:
6303 abort ();
6304 }
6305 }
6306
6307 if (contents != NULL
6308 && elf_section_data (sec)->this_hdr.contents != contents)
6309 free (contents);
6310 contents = NULL;
6311
6312 if (internal_relocs != NULL
6313 && elf_section_data (sec)->relocs != internal_relocs)
6314 free (internal_relocs);
6315 internal_relocs = NULL;
6316 }
6317
6318 return TRUE;
6319
6320 error_return:
6321 if (contents != NULL
6322 && elf_section_data (sec)->this_hdr.contents != contents)
6323 free (contents);
6324 if (internal_relocs != NULL
6325 && elf_section_data (sec)->relocs != internal_relocs)
6326 free (internal_relocs);
6327
6328 return FALSE;
6329 }
6330 #endif
6331
6332
6333 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6334
6335 void
6336 bfd_elf32_arm_init_maps (bfd *abfd)
6337 {
6338 Elf_Internal_Sym *isymbuf;
6339 Elf_Internal_Shdr *hdr;
6340 unsigned int i, localsyms;
6341
6342 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6343 if (! is_arm_elf (abfd))
6344 return;
6345
6346 if ((abfd->flags & DYNAMIC) != 0)
6347 return;
6348
6349 hdr = & elf_symtab_hdr (abfd);
6350 localsyms = hdr->sh_info;
6351
6352 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6353 should contain the number of local symbols, which should come before any
6354 global symbols. Mapping symbols are always local. */
6355 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6356 NULL);
6357
6358 /* No internal symbols read? Skip this BFD. */
6359 if (isymbuf == NULL)
6360 return;
6361
6362 for (i = 0; i < localsyms; i++)
6363 {
6364 Elf_Internal_Sym *isym = &isymbuf[i];
6365 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6366 const char *name;
6367
6368 if (sec != NULL
6369 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6370 {
6371 name = bfd_elf_string_from_elf_section (abfd,
6372 hdr->sh_link, isym->st_name);
6373
6374 if (bfd_is_arm_special_symbol_name (name,
6375 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6376 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6377 }
6378 }
6379 }
6380
6381
6382 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6383 say what they wanted. */
6384
6385 void
6386 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6387 {
6388 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6389 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6390
6391 if (globals == NULL)
6392 return;
6393
6394 if (globals->fix_cortex_a8 == -1)
6395 {
6396 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6397 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6398 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6399 || out_attr[Tag_CPU_arch_profile].i == 0))
6400 globals->fix_cortex_a8 = 1;
6401 else
6402 globals->fix_cortex_a8 = 0;
6403 }
6404 }
6405
6406
6407 void
6408 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6409 {
6410 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6411 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6412
6413 if (globals == NULL)
6414 return;
6415 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6416 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6417 {
6418 switch (globals->vfp11_fix)
6419 {
6420 case BFD_ARM_VFP11_FIX_DEFAULT:
6421 case BFD_ARM_VFP11_FIX_NONE:
6422 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6423 break;
6424
6425 default:
6426 /* Give a warning, but do as the user requests anyway. */
6427 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6428 "workaround is not necessary for target architecture"), obfd);
6429 }
6430 }
6431 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6432 /* For earlier architectures, we might need the workaround, but do not
6433 enable it by default. If users is running with broken hardware, they
6434 must enable the erratum fix explicitly. */
6435 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6436 }
6437
6438
6439 enum bfd_arm_vfp11_pipe
6440 {
6441 VFP11_FMAC,
6442 VFP11_LS,
6443 VFP11_DS,
6444 VFP11_BAD
6445 };
6446
6447 /* Return a VFP register number. This is encoded as RX:X for single-precision
6448 registers, or X:RX for double-precision registers, where RX is the group of
6449 four bits in the instruction encoding and X is the single extension bit.
6450 RX and X fields are specified using their lowest (starting) bit. The return
6451 value is:
6452
6453 0...31: single-precision registers s0...s31
6454 32...63: double-precision registers d0...d31.
6455
6456 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6457 encounter VFP3 instructions, so we allow the full range for DP registers. */
6458
6459 static unsigned int
6460 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6461 unsigned int x)
6462 {
6463 if (is_double)
6464 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6465 else
6466 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6467 }
6468
6469 /* Set bits in *WMASK according to a register number REG as encoded by
6470 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6471
6472 static void
6473 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6474 {
6475 if (reg < 32)
6476 *wmask |= 1 << reg;
6477 else if (reg < 48)
6478 *wmask |= 3 << ((reg - 32) * 2);
6479 }
6480
6481 /* Return TRUE if WMASK overwrites anything in REGS. */
6482
6483 static bfd_boolean
6484 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6485 {
6486 int i;
6487
6488 for (i = 0; i < numregs; i++)
6489 {
6490 unsigned int reg = regs[i];
6491
6492 if (reg < 32 && (wmask & (1 << reg)) != 0)
6493 return TRUE;
6494
6495 reg -= 32;
6496
6497 if (reg >= 16)
6498 continue;
6499
6500 if ((wmask & (3 << (reg * 2))) != 0)
6501 return TRUE;
6502 }
6503
6504 return FALSE;
6505 }
6506
6507 /* In this function, we're interested in two things: finding input registers
6508 for VFP data-processing instructions, and finding the set of registers which
6509 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6510 hold the written set, so FLDM etc. are easy to deal with (we're only
6511 interested in 32 SP registers or 16 dp registers, due to the VFP version
6512 implemented by the chip in question). DP registers are marked by setting
6513 both SP registers in the write mask). */
6514
6515 static enum bfd_arm_vfp11_pipe
6516 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6517 int *numregs)
6518 {
6519 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6520 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6521
6522 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6523 {
6524 unsigned int pqrs;
6525 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6526 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6527
6528 pqrs = ((insn & 0x00800000) >> 20)
6529 | ((insn & 0x00300000) >> 19)
6530 | ((insn & 0x00000040) >> 6);
6531
6532 switch (pqrs)
6533 {
6534 case 0: /* fmac[sd]. */
6535 case 1: /* fnmac[sd]. */
6536 case 2: /* fmsc[sd]. */
6537 case 3: /* fnmsc[sd]. */
6538 vpipe = VFP11_FMAC;
6539 bfd_arm_vfp11_write_mask (destmask, fd);
6540 regs[0] = fd;
6541 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6542 regs[2] = fm;
6543 *numregs = 3;
6544 break;
6545
6546 case 4: /* fmul[sd]. */
6547 case 5: /* fnmul[sd]. */
6548 case 6: /* fadd[sd]. */
6549 case 7: /* fsub[sd]. */
6550 vpipe = VFP11_FMAC;
6551 goto vfp_binop;
6552
6553 case 8: /* fdiv[sd]. */
6554 vpipe = VFP11_DS;
6555 vfp_binop:
6556 bfd_arm_vfp11_write_mask (destmask, fd);
6557 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6558 regs[1] = fm;
6559 *numregs = 2;
6560 break;
6561
6562 case 15: /* extended opcode. */
6563 {
6564 unsigned int extn = ((insn >> 15) & 0x1e)
6565 | ((insn >> 7) & 1);
6566
6567 switch (extn)
6568 {
6569 case 0: /* fcpy[sd]. */
6570 case 1: /* fabs[sd]. */
6571 case 2: /* fneg[sd]. */
6572 case 8: /* fcmp[sd]. */
6573 case 9: /* fcmpe[sd]. */
6574 case 10: /* fcmpz[sd]. */
6575 case 11: /* fcmpez[sd]. */
6576 case 16: /* fuito[sd]. */
6577 case 17: /* fsito[sd]. */
6578 case 24: /* ftoui[sd]. */
6579 case 25: /* ftouiz[sd]. */
6580 case 26: /* ftosi[sd]. */
6581 case 27: /* ftosiz[sd]. */
6582 /* These instructions will not bounce due to underflow. */
6583 *numregs = 0;
6584 vpipe = VFP11_FMAC;
6585 break;
6586
6587 case 3: /* fsqrt[sd]. */
6588 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6589 registers to cause the erratum in previous instructions. */
6590 bfd_arm_vfp11_write_mask (destmask, fd);
6591 vpipe = VFP11_DS;
6592 break;
6593
6594 case 15: /* fcvt{ds,sd}. */
6595 {
6596 int rnum = 0;
6597
6598 bfd_arm_vfp11_write_mask (destmask, fd);
6599
6600 /* Only FCVTSD can underflow. */
6601 if ((insn & 0x100) != 0)
6602 regs[rnum++] = fm;
6603
6604 *numregs = rnum;
6605
6606 vpipe = VFP11_FMAC;
6607 }
6608 break;
6609
6610 default:
6611 return VFP11_BAD;
6612 }
6613 }
6614 break;
6615
6616 default:
6617 return VFP11_BAD;
6618 }
6619 }
6620 /* Two-register transfer. */
6621 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6622 {
6623 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6624
6625 if ((insn & 0x100000) == 0)
6626 {
6627 if (is_double)
6628 bfd_arm_vfp11_write_mask (destmask, fm);
6629 else
6630 {
6631 bfd_arm_vfp11_write_mask (destmask, fm);
6632 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6633 }
6634 }
6635
6636 vpipe = VFP11_LS;
6637 }
6638 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6639 {
6640 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6641 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6642
6643 switch (puw)
6644 {
6645 case 0: /* Two-reg transfer. We should catch these above. */
6646 abort ();
6647
6648 case 2: /* fldm[sdx]. */
6649 case 3:
6650 case 5:
6651 {
6652 unsigned int i, offset = insn & 0xff;
6653
6654 if (is_double)
6655 offset >>= 1;
6656
6657 for (i = fd; i < fd + offset; i++)
6658 bfd_arm_vfp11_write_mask (destmask, i);
6659 }
6660 break;
6661
6662 case 4: /* fld[sd]. */
6663 case 6:
6664 bfd_arm_vfp11_write_mask (destmask, fd);
6665 break;
6666
6667 default:
6668 return VFP11_BAD;
6669 }
6670
6671 vpipe = VFP11_LS;
6672 }
6673 /* Single-register transfer. Note L==0. */
6674 else if ((insn & 0x0f100e10) == 0x0e000a10)
6675 {
6676 unsigned int opcode = (insn >> 21) & 7;
6677 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6678
6679 switch (opcode)
6680 {
6681 case 0: /* fmsr/fmdlr. */
6682 case 1: /* fmdhr. */
6683 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6684 destination register. I don't know if this is exactly right,
6685 but it is the conservative choice. */
6686 bfd_arm_vfp11_write_mask (destmask, fn);
6687 break;
6688
6689 case 7: /* fmxr. */
6690 break;
6691 }
6692
6693 vpipe = VFP11_LS;
6694 }
6695
6696 return vpipe;
6697 }
6698
6699
6700 static int elf32_arm_compare_mapping (const void * a, const void * b);
6701
6702
6703 /* Look for potentially-troublesome code sequences which might trigger the
6704 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6705 (available from ARM) for details of the erratum. A short version is
6706 described in ld.texinfo. */
6707
6708 bfd_boolean
6709 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6710 {
6711 asection *sec;
6712 bfd_byte *contents = NULL;
6713 int state = 0;
6714 int regs[3], numregs = 0;
6715 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6716 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6717
6718 if (globals == NULL)
6719 return FALSE;
6720
6721 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6722 The states transition as follows:
6723
6724 0 -> 1 (vector) or 0 -> 2 (scalar)
6725 A VFP FMAC-pipeline instruction has been seen. Fill
6726 regs[0]..regs[numregs-1] with its input operands. Remember this
6727 instruction in 'first_fmac'.
6728
6729 1 -> 2
6730 Any instruction, except for a VFP instruction which overwrites
6731 regs[*].
6732
6733 1 -> 3 [ -> 0 ] or
6734 2 -> 3 [ -> 0 ]
6735 A VFP instruction has been seen which overwrites any of regs[*].
6736 We must make a veneer! Reset state to 0 before examining next
6737 instruction.
6738
6739 2 -> 0
6740 If we fail to match anything in state 2, reset to state 0 and reset
6741 the instruction pointer to the instruction after 'first_fmac'.
6742
6743 If the VFP11 vector mode is in use, there must be at least two unrelated
6744 instructions between anti-dependent VFP11 instructions to properly avoid
6745 triggering the erratum, hence the use of the extra state 1. */
6746
6747 /* If we are only performing a partial link do not bother
6748 to construct any glue. */
6749 if (link_info->relocatable)
6750 return TRUE;
6751
6752 /* Skip if this bfd does not correspond to an ELF image. */
6753 if (! is_arm_elf (abfd))
6754 return TRUE;
6755
6756 /* We should have chosen a fix type by the time we get here. */
6757 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6758
6759 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6760 return TRUE;
6761
6762 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6763 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6764 return TRUE;
6765
6766 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6767 {
6768 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6769 struct _arm_elf_section_data *sec_data;
6770
6771 /* If we don't have executable progbits, we're not interested in this
6772 section. Also skip if section is to be excluded. */
6773 if (elf_section_type (sec) != SHT_PROGBITS
6774 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6775 || (sec->flags & SEC_EXCLUDE) != 0
6776 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6777 || sec->output_section == bfd_abs_section_ptr
6778 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6779 continue;
6780
6781 sec_data = elf32_arm_section_data (sec);
6782
6783 if (sec_data->mapcount == 0)
6784 continue;
6785
6786 if (elf_section_data (sec)->this_hdr.contents != NULL)
6787 contents = elf_section_data (sec)->this_hdr.contents;
6788 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6789 goto error_return;
6790
6791 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6792 elf32_arm_compare_mapping);
6793
6794 for (span = 0; span < sec_data->mapcount; span++)
6795 {
6796 unsigned int span_start = sec_data->map[span].vma;
6797 unsigned int span_end = (span == sec_data->mapcount - 1)
6798 ? sec->size : sec_data->map[span + 1].vma;
6799 char span_type = sec_data->map[span].type;
6800
6801 /* FIXME: Only ARM mode is supported at present. We may need to
6802 support Thumb-2 mode also at some point. */
6803 if (span_type != 'a')
6804 continue;
6805
6806 for (i = span_start; i < span_end;)
6807 {
6808 unsigned int next_i = i + 4;
6809 unsigned int insn = bfd_big_endian (abfd)
6810 ? (contents[i] << 24)
6811 | (contents[i + 1] << 16)
6812 | (contents[i + 2] << 8)
6813 | contents[i + 3]
6814 : (contents[i + 3] << 24)
6815 | (contents[i + 2] << 16)
6816 | (contents[i + 1] << 8)
6817 | contents[i];
6818 unsigned int writemask = 0;
6819 enum bfd_arm_vfp11_pipe vpipe;
6820
6821 switch (state)
6822 {
6823 case 0:
6824 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6825 &numregs);
6826 /* I'm assuming the VFP11 erratum can trigger with denorm
6827 operands on either the FMAC or the DS pipeline. This might
6828 lead to slightly overenthusiastic veneer insertion. */
6829 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6830 {
6831 state = use_vector ? 1 : 2;
6832 first_fmac = i;
6833 veneer_of_insn = insn;
6834 }
6835 break;
6836
6837 case 1:
6838 {
6839 int other_regs[3], other_numregs;
6840 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6841 other_regs,
6842 &other_numregs);
6843 if (vpipe != VFP11_BAD
6844 && bfd_arm_vfp11_antidependency (writemask, regs,
6845 numregs))
6846 state = 3;
6847 else
6848 state = 2;
6849 }
6850 break;
6851
6852 case 2:
6853 {
6854 int other_regs[3], other_numregs;
6855 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6856 other_regs,
6857 &other_numregs);
6858 if (vpipe != VFP11_BAD
6859 && bfd_arm_vfp11_antidependency (writemask, regs,
6860 numregs))
6861 state = 3;
6862 else
6863 {
6864 state = 0;
6865 next_i = first_fmac + 4;
6866 }
6867 }
6868 break;
6869
6870 case 3:
6871 abort (); /* Should be unreachable. */
6872 }
6873
6874 if (state == 3)
6875 {
6876 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6877 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6878
6879 elf32_arm_section_data (sec)->erratumcount += 1;
6880
6881 newerr->u.b.vfp_insn = veneer_of_insn;
6882
6883 switch (span_type)
6884 {
6885 case 'a':
6886 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6887 break;
6888
6889 default:
6890 abort ();
6891 }
6892
6893 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6894 first_fmac);
6895
6896 newerr->vma = -1;
6897
6898 newerr->next = sec_data->erratumlist;
6899 sec_data->erratumlist = newerr;
6900
6901 state = 0;
6902 }
6903
6904 i = next_i;
6905 }
6906 }
6907
6908 if (contents != NULL
6909 && elf_section_data (sec)->this_hdr.contents != contents)
6910 free (contents);
6911 contents = NULL;
6912 }
6913
6914 return TRUE;
6915
6916 error_return:
6917 if (contents != NULL
6918 && elf_section_data (sec)->this_hdr.contents != contents)
6919 free (contents);
6920
6921 return FALSE;
6922 }
6923
6924 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6925 after sections have been laid out, using specially-named symbols. */
6926
6927 void
6928 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6929 struct bfd_link_info *link_info)
6930 {
6931 asection *sec;
6932 struct elf32_arm_link_hash_table *globals;
6933 char *tmp_name;
6934
6935 if (link_info->relocatable)
6936 return;
6937
6938 /* Skip if this bfd does not correspond to an ELF image. */
6939 if (! is_arm_elf (abfd))
6940 return;
6941
6942 globals = elf32_arm_hash_table (link_info);
6943 if (globals == NULL)
6944 return;
6945
6946 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6947 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6948
6949 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6950 {
6951 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6952 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6953
6954 for (; errnode != NULL; errnode = errnode->next)
6955 {
6956 struct elf_link_hash_entry *myh;
6957 bfd_vma vma;
6958
6959 switch (errnode->type)
6960 {
6961 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6962 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6963 /* Find veneer symbol. */
6964 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6965 errnode->u.b.veneer->u.v.id);
6966
6967 myh = elf_link_hash_lookup
6968 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6969
6970 if (myh == NULL)
6971 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6972 "`%s'"), abfd, tmp_name);
6973
6974 vma = myh->root.u.def.section->output_section->vma
6975 + myh->root.u.def.section->output_offset
6976 + myh->root.u.def.value;
6977
6978 errnode->u.b.veneer->vma = vma;
6979 break;
6980
6981 case VFP11_ERRATUM_ARM_VENEER:
6982 case VFP11_ERRATUM_THUMB_VENEER:
6983 /* Find return location. */
6984 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6985 errnode->u.v.id);
6986
6987 myh = elf_link_hash_lookup
6988 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6989
6990 if (myh == NULL)
6991 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6992 "`%s'"), abfd, tmp_name);
6993
6994 vma = myh->root.u.def.section->output_section->vma
6995 + myh->root.u.def.section->output_offset
6996 + myh->root.u.def.value;
6997
6998 errnode->u.v.branch->vma = vma;
6999 break;
7000
7001 default:
7002 abort ();
7003 }
7004 }
7005 }
7006
7007 free (tmp_name);
7008 }
7009
7010
7011 /* Set target relocation values needed during linking. */
7012
7013 void
7014 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7015 struct bfd_link_info *link_info,
7016 int target1_is_rel,
7017 char * target2_type,
7018 int fix_v4bx,
7019 int use_blx,
7020 bfd_arm_vfp11_fix vfp11_fix,
7021 int no_enum_warn, int no_wchar_warn,
7022 int pic_veneer, int fix_cortex_a8,
7023 int fix_arm1176)
7024 {
7025 struct elf32_arm_link_hash_table *globals;
7026
7027 globals = elf32_arm_hash_table (link_info);
7028 if (globals == NULL)
7029 return;
7030
7031 globals->target1_is_rel = target1_is_rel;
7032 if (strcmp (target2_type, "rel") == 0)
7033 globals->target2_reloc = R_ARM_REL32;
7034 else if (strcmp (target2_type, "abs") == 0)
7035 globals->target2_reloc = R_ARM_ABS32;
7036 else if (strcmp (target2_type, "got-rel") == 0)
7037 globals->target2_reloc = R_ARM_GOT_PREL;
7038 else
7039 {
7040 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7041 target2_type);
7042 }
7043 globals->fix_v4bx = fix_v4bx;
7044 globals->use_blx |= use_blx;
7045 globals->vfp11_fix = vfp11_fix;
7046 globals->pic_veneer = pic_veneer;
7047 globals->fix_cortex_a8 = fix_cortex_a8;
7048 globals->fix_arm1176 = fix_arm1176;
7049
7050 BFD_ASSERT (is_arm_elf (output_bfd));
7051 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7052 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7053 }
7054
7055 /* Replace the target offset of a Thumb bl or b.w instruction. */
7056
7057 static void
7058 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7059 {
7060 bfd_vma upper;
7061 bfd_vma lower;
7062 int reloc_sign;
7063
7064 BFD_ASSERT ((offset & 1) == 0);
7065
7066 upper = bfd_get_16 (abfd, insn);
7067 lower = bfd_get_16 (abfd, insn + 2);
7068 reloc_sign = (offset < 0) ? 1 : 0;
7069 upper = (upper & ~(bfd_vma) 0x7ff)
7070 | ((offset >> 12) & 0x3ff)
7071 | (reloc_sign << 10);
7072 lower = (lower & ~(bfd_vma) 0x2fff)
7073 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7074 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7075 | ((offset >> 1) & 0x7ff);
7076 bfd_put_16 (abfd, upper, insn);
7077 bfd_put_16 (abfd, lower, insn + 2);
7078 }
7079
7080 /* Thumb code calling an ARM function. */
7081
7082 static int
7083 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7084 const char * name,
7085 bfd * input_bfd,
7086 bfd * output_bfd,
7087 asection * input_section,
7088 bfd_byte * hit_data,
7089 asection * sym_sec,
7090 bfd_vma offset,
7091 bfd_signed_vma addend,
7092 bfd_vma val,
7093 char **error_message)
7094 {
7095 asection * s = 0;
7096 bfd_vma my_offset;
7097 long int ret_offset;
7098 struct elf_link_hash_entry * myh;
7099 struct elf32_arm_link_hash_table * globals;
7100
7101 myh = find_thumb_glue (info, name, error_message);
7102 if (myh == NULL)
7103 return FALSE;
7104
7105 globals = elf32_arm_hash_table (info);
7106 BFD_ASSERT (globals != NULL);
7107 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7108
7109 my_offset = myh->root.u.def.value;
7110
7111 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7112 THUMB2ARM_GLUE_SECTION_NAME);
7113
7114 BFD_ASSERT (s != NULL);
7115 BFD_ASSERT (s->contents != NULL);
7116 BFD_ASSERT (s->output_section != NULL);
7117
7118 if ((my_offset & 0x01) == 0x01)
7119 {
7120 if (sym_sec != NULL
7121 && sym_sec->owner != NULL
7122 && !INTERWORK_FLAG (sym_sec->owner))
7123 {
7124 (*_bfd_error_handler)
7125 (_("%B(%s): warning: interworking not enabled.\n"
7126 " first occurrence: %B: Thumb call to ARM"),
7127 sym_sec->owner, input_bfd, name);
7128
7129 return FALSE;
7130 }
7131
7132 --my_offset;
7133 myh->root.u.def.value = my_offset;
7134
7135 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7136 s->contents + my_offset);
7137
7138 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7139 s->contents + my_offset + 2);
7140
7141 ret_offset =
7142 /* Address of destination of the stub. */
7143 ((bfd_signed_vma) val)
7144 - ((bfd_signed_vma)
7145 /* Offset from the start of the current section
7146 to the start of the stubs. */
7147 (s->output_offset
7148 /* Offset of the start of this stub from the start of the stubs. */
7149 + my_offset
7150 /* Address of the start of the current section. */
7151 + s->output_section->vma)
7152 /* The branch instruction is 4 bytes into the stub. */
7153 + 4
7154 /* ARM branches work from the pc of the instruction + 8. */
7155 + 8);
7156
7157 put_arm_insn (globals, output_bfd,
7158 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7159 s->contents + my_offset + 4);
7160 }
7161
7162 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7163
7164 /* Now go back and fix up the original BL insn to point to here. */
7165 ret_offset =
7166 /* Address of where the stub is located. */
7167 (s->output_section->vma + s->output_offset + my_offset)
7168 /* Address of where the BL is located. */
7169 - (input_section->output_section->vma + input_section->output_offset
7170 + offset)
7171 /* Addend in the relocation. */
7172 - addend
7173 /* Biassing for PC-relative addressing. */
7174 - 8;
7175
7176 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7177
7178 return TRUE;
7179 }
7180
7181 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7182
7183 static struct elf_link_hash_entry *
7184 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7185 const char * name,
7186 bfd * input_bfd,
7187 bfd * output_bfd,
7188 asection * sym_sec,
7189 bfd_vma val,
7190 asection * s,
7191 char ** error_message)
7192 {
7193 bfd_vma my_offset;
7194 long int ret_offset;
7195 struct elf_link_hash_entry * myh;
7196 struct elf32_arm_link_hash_table * globals;
7197
7198 myh = find_arm_glue (info, name, error_message);
7199 if (myh == NULL)
7200 return NULL;
7201
7202 globals = elf32_arm_hash_table (info);
7203 BFD_ASSERT (globals != NULL);
7204 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7205
7206 my_offset = myh->root.u.def.value;
7207
7208 if ((my_offset & 0x01) == 0x01)
7209 {
7210 if (sym_sec != NULL
7211 && sym_sec->owner != NULL
7212 && !INTERWORK_FLAG (sym_sec->owner))
7213 {
7214 (*_bfd_error_handler)
7215 (_("%B(%s): warning: interworking not enabled.\n"
7216 " first occurrence: %B: arm call to thumb"),
7217 sym_sec->owner, input_bfd, name);
7218 }
7219
7220 --my_offset;
7221 myh->root.u.def.value = my_offset;
7222
7223 if (info->shared || globals->root.is_relocatable_executable
7224 || globals->pic_veneer)
7225 {
7226 /* For relocatable objects we can't use absolute addresses,
7227 so construct the address from a relative offset. */
7228 /* TODO: If the offset is small it's probably worth
7229 constructing the address with adds. */
7230 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7231 s->contents + my_offset);
7232 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7233 s->contents + my_offset + 4);
7234 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7235 s->contents + my_offset + 8);
7236 /* Adjust the offset by 4 for the position of the add,
7237 and 8 for the pipeline offset. */
7238 ret_offset = (val - (s->output_offset
7239 + s->output_section->vma
7240 + my_offset + 12))
7241 | 1;
7242 bfd_put_32 (output_bfd, ret_offset,
7243 s->contents + my_offset + 12);
7244 }
7245 else if (globals->use_blx)
7246 {
7247 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7248 s->contents + my_offset);
7249
7250 /* It's a thumb address. Add the low order bit. */
7251 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7252 s->contents + my_offset + 4);
7253 }
7254 else
7255 {
7256 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7257 s->contents + my_offset);
7258
7259 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7260 s->contents + my_offset + 4);
7261
7262 /* It's a thumb address. Add the low order bit. */
7263 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7264 s->contents + my_offset + 8);
7265
7266 my_offset += 12;
7267 }
7268 }
7269
7270 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7271
7272 return myh;
7273 }
7274
7275 /* Arm code calling a Thumb function. */
7276
7277 static int
7278 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7279 const char * name,
7280 bfd * input_bfd,
7281 bfd * output_bfd,
7282 asection * input_section,
7283 bfd_byte * hit_data,
7284 asection * sym_sec,
7285 bfd_vma offset,
7286 bfd_signed_vma addend,
7287 bfd_vma val,
7288 char **error_message)
7289 {
7290 unsigned long int tmp;
7291 bfd_vma my_offset;
7292 asection * s;
7293 long int ret_offset;
7294 struct elf_link_hash_entry * myh;
7295 struct elf32_arm_link_hash_table * globals;
7296
7297 globals = elf32_arm_hash_table (info);
7298 BFD_ASSERT (globals != NULL);
7299 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7300
7301 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7302 ARM2THUMB_GLUE_SECTION_NAME);
7303 BFD_ASSERT (s != NULL);
7304 BFD_ASSERT (s->contents != NULL);
7305 BFD_ASSERT (s->output_section != NULL);
7306
7307 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7308 sym_sec, val, s, error_message);
7309 if (!myh)
7310 return FALSE;
7311
7312 my_offset = myh->root.u.def.value;
7313 tmp = bfd_get_32 (input_bfd, hit_data);
7314 tmp = tmp & 0xFF000000;
7315
7316 /* Somehow these are both 4 too far, so subtract 8. */
7317 ret_offset = (s->output_offset
7318 + my_offset
7319 + s->output_section->vma
7320 - (input_section->output_offset
7321 + input_section->output_section->vma
7322 + offset + addend)
7323 - 8);
7324
7325 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7326
7327 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7328
7329 return TRUE;
7330 }
7331
7332 /* Populate Arm stub for an exported Thumb function. */
7333
7334 static bfd_boolean
7335 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7336 {
7337 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7338 asection * s;
7339 struct elf_link_hash_entry * myh;
7340 struct elf32_arm_link_hash_entry *eh;
7341 struct elf32_arm_link_hash_table * globals;
7342 asection *sec;
7343 bfd_vma val;
7344 char *error_message;
7345
7346 eh = elf32_arm_hash_entry (h);
7347 /* Allocate stubs for exported Thumb functions on v4t. */
7348 if (eh->export_glue == NULL)
7349 return TRUE;
7350
7351 globals = elf32_arm_hash_table (info);
7352 BFD_ASSERT (globals != NULL);
7353 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7354
7355 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7356 ARM2THUMB_GLUE_SECTION_NAME);
7357 BFD_ASSERT (s != NULL);
7358 BFD_ASSERT (s->contents != NULL);
7359 BFD_ASSERT (s->output_section != NULL);
7360
7361 sec = eh->export_glue->root.u.def.section;
7362
7363 BFD_ASSERT (sec->output_section != NULL);
7364
7365 val = eh->export_glue->root.u.def.value + sec->output_offset
7366 + sec->output_section->vma;
7367
7368 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7369 h->root.u.def.section->owner,
7370 globals->obfd, sec, val, s,
7371 &error_message);
7372 BFD_ASSERT (myh);
7373 return TRUE;
7374 }
7375
7376 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7377
7378 static bfd_vma
7379 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7380 {
7381 bfd_byte *p;
7382 bfd_vma glue_addr;
7383 asection *s;
7384 struct elf32_arm_link_hash_table *globals;
7385
7386 globals = elf32_arm_hash_table (info);
7387 BFD_ASSERT (globals != NULL);
7388 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7389
7390 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
7391 ARM_BX_GLUE_SECTION_NAME);
7392 BFD_ASSERT (s != NULL);
7393 BFD_ASSERT (s->contents != NULL);
7394 BFD_ASSERT (s->output_section != NULL);
7395
7396 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7397
7398 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7399
7400 if ((globals->bx_glue_offset[reg] & 1) == 0)
7401 {
7402 p = s->contents + glue_addr;
7403 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7404 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7405 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7406 globals->bx_glue_offset[reg] |= 1;
7407 }
7408
7409 return glue_addr + s->output_section->vma + s->output_offset;
7410 }
7411
7412 /* Generate Arm stubs for exported Thumb symbols. */
7413 static void
7414 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7415 struct bfd_link_info *link_info)
7416 {
7417 struct elf32_arm_link_hash_table * globals;
7418
7419 if (link_info == NULL)
7420 /* Ignore this if we are not called by the ELF backend linker. */
7421 return;
7422
7423 globals = elf32_arm_hash_table (link_info);
7424 if (globals == NULL)
7425 return;
7426
7427 /* If blx is available then exported Thumb symbols are OK and there is
7428 nothing to do. */
7429 if (globals->use_blx)
7430 return;
7431
7432 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7433 link_info);
7434 }
7435
7436 /* Reserve space for COUNT dynamic relocations in relocation selection
7437 SRELOC. */
7438
7439 static void
7440 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7441 bfd_size_type count)
7442 {
7443 struct elf32_arm_link_hash_table *htab;
7444
7445 htab = elf32_arm_hash_table (info);
7446 BFD_ASSERT (htab->root.dynamic_sections_created);
7447 if (sreloc == NULL)
7448 abort ();
7449 sreloc->size += RELOC_SIZE (htab) * count;
7450 }
7451
7452 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7453 dynamic, the relocations should go in SRELOC, otherwise they should
7454 go in the special .rel.iplt section. */
7455
7456 static void
7457 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7458 bfd_size_type count)
7459 {
7460 struct elf32_arm_link_hash_table *htab;
7461
7462 htab = elf32_arm_hash_table (info);
7463 if (!htab->root.dynamic_sections_created)
7464 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7465 else
7466 {
7467 BFD_ASSERT (sreloc != NULL);
7468 sreloc->size += RELOC_SIZE (htab) * count;
7469 }
7470 }
7471
7472 /* Add relocation REL to the end of relocation section SRELOC. */
7473
7474 static void
7475 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7476 asection *sreloc, Elf_Internal_Rela *rel)
7477 {
7478 bfd_byte *loc;
7479 struct elf32_arm_link_hash_table *htab;
7480
7481 htab = elf32_arm_hash_table (info);
7482 if (!htab->root.dynamic_sections_created
7483 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7484 sreloc = htab->root.irelplt;
7485 if (sreloc == NULL)
7486 abort ();
7487 loc = sreloc->contents;
7488 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7489 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7490 abort ();
7491 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7492 }
7493
7494 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7495 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7496 to .plt. */
7497
7498 static void
7499 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7500 bfd_boolean is_iplt_entry,
7501 union gotplt_union *root_plt,
7502 struct arm_plt_info *arm_plt)
7503 {
7504 struct elf32_arm_link_hash_table *htab;
7505 asection *splt;
7506 asection *sgotplt;
7507
7508 htab = elf32_arm_hash_table (info);
7509
7510 if (is_iplt_entry)
7511 {
7512 splt = htab->root.iplt;
7513 sgotplt = htab->root.igotplt;
7514
7515 /* NaCl uses a special first entry in .iplt too. */
7516 if (htab->nacl_p && splt->size == 0)
7517 splt->size += htab->plt_header_size;
7518
7519 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7520 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7521 }
7522 else
7523 {
7524 splt = htab->root.splt;
7525 sgotplt = htab->root.sgotplt;
7526
7527 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7528 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7529
7530 /* If this is the first .plt entry, make room for the special
7531 first entry. */
7532 if (splt->size == 0)
7533 splt->size += htab->plt_header_size;
7534
7535 htab->next_tls_desc_index++;
7536 }
7537
7538 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7539 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7540 splt->size += PLT_THUMB_STUB_SIZE;
7541 root_plt->offset = splt->size;
7542 splt->size += htab->plt_entry_size;
7543
7544 if (!htab->symbian_p)
7545 {
7546 /* We also need to make an entry in the .got.plt section, which
7547 will be placed in the .got section by the linker script. */
7548 if (is_iplt_entry)
7549 arm_plt->got_offset = sgotplt->size;
7550 else
7551 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7552 sgotplt->size += 4;
7553 }
7554 }
7555
7556 static bfd_vma
7557 arm_movw_immediate (bfd_vma value)
7558 {
7559 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7560 }
7561
7562 static bfd_vma
7563 arm_movt_immediate (bfd_vma value)
7564 {
7565 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7566 }
7567
7568 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7569 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7570 Otherwise, DYNINDX is the index of the symbol in the dynamic
7571 symbol table and SYM_VALUE is undefined.
7572
7573 ROOT_PLT points to the offset of the PLT entry from the start of its
7574 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7575 bookkeeping information.
7576
7577 Returns FALSE if there was a problem. */
7578
7579 static bfd_boolean
7580 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7581 union gotplt_union *root_plt,
7582 struct arm_plt_info *arm_plt,
7583 int dynindx, bfd_vma sym_value)
7584 {
7585 struct elf32_arm_link_hash_table *htab;
7586 asection *sgot;
7587 asection *splt;
7588 asection *srel;
7589 bfd_byte *loc;
7590 bfd_vma plt_index;
7591 Elf_Internal_Rela rel;
7592 bfd_vma plt_header_size;
7593 bfd_vma got_header_size;
7594
7595 htab = elf32_arm_hash_table (info);
7596
7597 /* Pick the appropriate sections and sizes. */
7598 if (dynindx == -1)
7599 {
7600 splt = htab->root.iplt;
7601 sgot = htab->root.igotplt;
7602 srel = htab->root.irelplt;
7603
7604 /* There are no reserved entries in .igot.plt, and no special
7605 first entry in .iplt. */
7606 got_header_size = 0;
7607 plt_header_size = 0;
7608 }
7609 else
7610 {
7611 splt = htab->root.splt;
7612 sgot = htab->root.sgotplt;
7613 srel = htab->root.srelplt;
7614
7615 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7616 plt_header_size = htab->plt_header_size;
7617 }
7618 BFD_ASSERT (splt != NULL && srel != NULL);
7619
7620 /* Fill in the entry in the procedure linkage table. */
7621 if (htab->symbian_p)
7622 {
7623 BFD_ASSERT (dynindx >= 0);
7624 put_arm_insn (htab, output_bfd,
7625 elf32_arm_symbian_plt_entry[0],
7626 splt->contents + root_plt->offset);
7627 bfd_put_32 (output_bfd,
7628 elf32_arm_symbian_plt_entry[1],
7629 splt->contents + root_plt->offset + 4);
7630
7631 /* Fill in the entry in the .rel.plt section. */
7632 rel.r_offset = (splt->output_section->vma
7633 + splt->output_offset
7634 + root_plt->offset + 4);
7635 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7636
7637 /* Get the index in the procedure linkage table which
7638 corresponds to this symbol. This is the index of this symbol
7639 in all the symbols for which we are making plt entries. The
7640 first entry in the procedure linkage table is reserved. */
7641 plt_index = ((root_plt->offset - plt_header_size)
7642 / htab->plt_entry_size);
7643 }
7644 else
7645 {
7646 bfd_vma got_offset, got_address, plt_address;
7647 bfd_vma got_displacement, initial_got_entry;
7648 bfd_byte * ptr;
7649
7650 BFD_ASSERT (sgot != NULL);
7651
7652 /* Get the offset into the .(i)got.plt table of the entry that
7653 corresponds to this function. */
7654 got_offset = (arm_plt->got_offset & -2);
7655
7656 /* Get the index in the procedure linkage table which
7657 corresponds to this symbol. This is the index of this symbol
7658 in all the symbols for which we are making plt entries.
7659 After the reserved .got.plt entries, all symbols appear in
7660 the same order as in .plt. */
7661 plt_index = (got_offset - got_header_size) / 4;
7662
7663 /* Calculate the address of the GOT entry. */
7664 got_address = (sgot->output_section->vma
7665 + sgot->output_offset
7666 + got_offset);
7667
7668 /* ...and the address of the PLT entry. */
7669 plt_address = (splt->output_section->vma
7670 + splt->output_offset
7671 + root_plt->offset);
7672
7673 ptr = splt->contents + root_plt->offset;
7674 if (htab->vxworks_p && info->shared)
7675 {
7676 unsigned int i;
7677 bfd_vma val;
7678
7679 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7680 {
7681 val = elf32_arm_vxworks_shared_plt_entry[i];
7682 if (i == 2)
7683 val |= got_address - sgot->output_section->vma;
7684 if (i == 5)
7685 val |= plt_index * RELOC_SIZE (htab);
7686 if (i == 2 || i == 5)
7687 bfd_put_32 (output_bfd, val, ptr);
7688 else
7689 put_arm_insn (htab, output_bfd, val, ptr);
7690 }
7691 }
7692 else if (htab->vxworks_p)
7693 {
7694 unsigned int i;
7695 bfd_vma val;
7696
7697 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7698 {
7699 val = elf32_arm_vxworks_exec_plt_entry[i];
7700 if (i == 2)
7701 val |= got_address;
7702 if (i == 4)
7703 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7704 if (i == 5)
7705 val |= plt_index * RELOC_SIZE (htab);
7706 if (i == 2 || i == 5)
7707 bfd_put_32 (output_bfd, val, ptr);
7708 else
7709 put_arm_insn (htab, output_bfd, val, ptr);
7710 }
7711
7712 loc = (htab->srelplt2->contents
7713 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7714
7715 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7716 referencing the GOT for this PLT entry. */
7717 rel.r_offset = plt_address + 8;
7718 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7719 rel.r_addend = got_offset;
7720 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7721 loc += RELOC_SIZE (htab);
7722
7723 /* Create the R_ARM_ABS32 relocation referencing the
7724 beginning of the PLT for this GOT entry. */
7725 rel.r_offset = got_address;
7726 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7727 rel.r_addend = 0;
7728 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7729 }
7730 else if (htab->nacl_p)
7731 {
7732 /* Calculate the displacement between the PLT slot and the
7733 common tail that's part of the special initial PLT slot. */
7734 int32_t tail_displacement
7735 = ((splt->output_section->vma + splt->output_offset
7736 + ARM_NACL_PLT_TAIL_OFFSET)
7737 - (plt_address + htab->plt_entry_size + 4));
7738 BFD_ASSERT ((tail_displacement & 3) == 0);
7739 tail_displacement >>= 2;
7740
7741 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7742 || (-tail_displacement & 0xff000000) == 0);
7743
7744 /* Calculate the displacement between the PLT slot and the entry
7745 in the GOT. The offset accounts for the value produced by
7746 adding to pc in the penultimate instruction of the PLT stub. */
7747 got_displacement = (got_address
7748 - (plt_address + htab->plt_entry_size));
7749
7750 /* NaCl does not support interworking at all. */
7751 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7752
7753 put_arm_insn (htab, output_bfd,
7754 elf32_arm_nacl_plt_entry[0]
7755 | arm_movw_immediate (got_displacement),
7756 ptr + 0);
7757 put_arm_insn (htab, output_bfd,
7758 elf32_arm_nacl_plt_entry[1]
7759 | arm_movt_immediate (got_displacement),
7760 ptr + 4);
7761 put_arm_insn (htab, output_bfd,
7762 elf32_arm_nacl_plt_entry[2],
7763 ptr + 8);
7764 put_arm_insn (htab, output_bfd,
7765 elf32_arm_nacl_plt_entry[3]
7766 | (tail_displacement & 0x00ffffff),
7767 ptr + 12);
7768 }
7769 else if (using_thumb_only (htab))
7770 {
7771 /* PR ld/16017: Generate thumb only PLT entries. */
7772 if (!using_thumb2 (htab))
7773 {
7774 /* FIXME: We ought to be able to generate thumb-1 PLT
7775 instructions... */
7776 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
7777 output_bfd);
7778 return FALSE;
7779 }
7780
7781 /* Calculate the displacement between the PLT slot and the entry in
7782 the GOT. The 12-byte offset accounts for the value produced by
7783 adding to pc in the 3rd instruction of the PLT stub. */
7784 got_displacement = got_address - (plt_address + 12);
7785
7786 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
7787 instead of 'put_thumb_insn'. */
7788 put_arm_insn (htab, output_bfd,
7789 elf32_thumb2_plt_entry[0]
7790 | ((got_displacement & 0x000000ff) << 16)
7791 | ((got_displacement & 0x00000700) << 20)
7792 | ((got_displacement & 0x00000800) >> 1)
7793 | ((got_displacement & 0x0000f000) >> 12),
7794 ptr + 0);
7795 put_arm_insn (htab, output_bfd,
7796 elf32_thumb2_plt_entry[1]
7797 | ((got_displacement & 0x00ff0000) )
7798 | ((got_displacement & 0x07000000) << 4)
7799 | ((got_displacement & 0x08000000) >> 17)
7800 | ((got_displacement & 0xf0000000) >> 28),
7801 ptr + 4);
7802 put_arm_insn (htab, output_bfd,
7803 elf32_thumb2_plt_entry[2],
7804 ptr + 8);
7805 put_arm_insn (htab, output_bfd,
7806 elf32_thumb2_plt_entry[3],
7807 ptr + 12);
7808 }
7809 else
7810 {
7811 /* Calculate the displacement between the PLT slot and the
7812 entry in the GOT. The eight-byte offset accounts for the
7813 value produced by adding to pc in the first instruction
7814 of the PLT stub. */
7815 got_displacement = got_address - (plt_address + 8);
7816
7817 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7818 {
7819 put_thumb_insn (htab, output_bfd,
7820 elf32_arm_plt_thumb_stub[0], ptr - 4);
7821 put_thumb_insn (htab, output_bfd,
7822 elf32_arm_plt_thumb_stub[1], ptr - 2);
7823 }
7824
7825 if (!elf32_arm_use_long_plt_entry)
7826 {
7827 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7828
7829 put_arm_insn (htab, output_bfd,
7830 elf32_arm_plt_entry_short[0]
7831 | ((got_displacement & 0x0ff00000) >> 20),
7832 ptr + 0);
7833 put_arm_insn (htab, output_bfd,
7834 elf32_arm_plt_entry_short[1]
7835 | ((got_displacement & 0x000ff000) >> 12),
7836 ptr+ 4);
7837 put_arm_insn (htab, output_bfd,
7838 elf32_arm_plt_entry_short[2]
7839 | (got_displacement & 0x00000fff),
7840 ptr + 8);
7841 #ifdef FOUR_WORD_PLT
7842 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
7843 #endif
7844 }
7845 else
7846 {
7847 put_arm_insn (htab, output_bfd,
7848 elf32_arm_plt_entry_long[0]
7849 | ((got_displacement & 0xf0000000) >> 28),
7850 ptr + 0);
7851 put_arm_insn (htab, output_bfd,
7852 elf32_arm_plt_entry_long[1]
7853 | ((got_displacement & 0x0ff00000) >> 20),
7854 ptr + 4);
7855 put_arm_insn (htab, output_bfd,
7856 elf32_arm_plt_entry_long[2]
7857 | ((got_displacement & 0x000ff000) >> 12),
7858 ptr+ 8);
7859 put_arm_insn (htab, output_bfd,
7860 elf32_arm_plt_entry_long[3]
7861 | (got_displacement & 0x00000fff),
7862 ptr + 12);
7863 }
7864 }
7865
7866 /* Fill in the entry in the .rel(a).(i)plt section. */
7867 rel.r_offset = got_address;
7868 rel.r_addend = 0;
7869 if (dynindx == -1)
7870 {
7871 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7872 The dynamic linker or static executable then calls SYM_VALUE
7873 to determine the correct run-time value of the .igot.plt entry. */
7874 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7875 initial_got_entry = sym_value;
7876 }
7877 else
7878 {
7879 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7880 initial_got_entry = (splt->output_section->vma
7881 + splt->output_offset);
7882 }
7883
7884 /* Fill in the entry in the global offset table. */
7885 bfd_put_32 (output_bfd, initial_got_entry,
7886 sgot->contents + got_offset);
7887 }
7888
7889 if (dynindx == -1)
7890 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
7891 else
7892 {
7893 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7894 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7895 }
7896
7897 return TRUE;
7898 }
7899
7900 /* Some relocations map to different relocations depending on the
7901 target. Return the real relocation. */
7902
7903 static int
7904 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7905 int r_type)
7906 {
7907 switch (r_type)
7908 {
7909 case R_ARM_TARGET1:
7910 if (globals->target1_is_rel)
7911 return R_ARM_REL32;
7912 else
7913 return R_ARM_ABS32;
7914
7915 case R_ARM_TARGET2:
7916 return globals->target2_reloc;
7917
7918 default:
7919 return r_type;
7920 }
7921 }
7922
7923 /* Return the base VMA address which should be subtracted from real addresses
7924 when resolving @dtpoff relocation.
7925 This is PT_TLS segment p_vaddr. */
7926
7927 static bfd_vma
7928 dtpoff_base (struct bfd_link_info *info)
7929 {
7930 /* If tls_sec is NULL, we should have signalled an error already. */
7931 if (elf_hash_table (info)->tls_sec == NULL)
7932 return 0;
7933 return elf_hash_table (info)->tls_sec->vma;
7934 }
7935
7936 /* Return the relocation value for @tpoff relocation
7937 if STT_TLS virtual address is ADDRESS. */
7938
7939 static bfd_vma
7940 tpoff (struct bfd_link_info *info, bfd_vma address)
7941 {
7942 struct elf_link_hash_table *htab = elf_hash_table (info);
7943 bfd_vma base;
7944
7945 /* If tls_sec is NULL, we should have signalled an error already. */
7946 if (htab->tls_sec == NULL)
7947 return 0;
7948 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7949 return address - htab->tls_sec->vma + base;
7950 }
7951
7952 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7953 VALUE is the relocation value. */
7954
7955 static bfd_reloc_status_type
7956 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7957 {
7958 if (value > 0xfff)
7959 return bfd_reloc_overflow;
7960
7961 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7962 bfd_put_32 (abfd, value, data);
7963 return bfd_reloc_ok;
7964 }
7965
7966 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7967 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7968 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7969
7970 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7971 is to then call final_link_relocate. Return other values in the
7972 case of error.
7973
7974 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7975 the pre-relaxed code. It would be nice if the relocs were updated
7976 to match the optimization. */
7977
7978 static bfd_reloc_status_type
7979 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7980 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7981 Elf_Internal_Rela *rel, unsigned long is_local)
7982 {
7983 unsigned long insn;
7984
7985 switch (ELF32_R_TYPE (rel->r_info))
7986 {
7987 default:
7988 return bfd_reloc_notsupported;
7989
7990 case R_ARM_TLS_GOTDESC:
7991 if (is_local)
7992 insn = 0;
7993 else
7994 {
7995 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7996 if (insn & 1)
7997 insn -= 5; /* THUMB */
7998 else
7999 insn -= 8; /* ARM */
8000 }
8001 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8002 return bfd_reloc_continue;
8003
8004 case R_ARM_THM_TLS_DESCSEQ:
8005 /* Thumb insn. */
8006 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8007 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8008 {
8009 if (is_local)
8010 /* nop */
8011 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8012 }
8013 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8014 {
8015 if (is_local)
8016 /* nop */
8017 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8018 else
8019 /* ldr rx,[ry] */
8020 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8021 }
8022 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8023 {
8024 if (is_local)
8025 /* nop */
8026 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8027 else
8028 /* mov r0, rx */
8029 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8030 contents + rel->r_offset);
8031 }
8032 else
8033 {
8034 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8035 /* It's a 32 bit instruction, fetch the rest of it for
8036 error generation. */
8037 insn = (insn << 16)
8038 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8039 (*_bfd_error_handler)
8040 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8041 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8042 return bfd_reloc_notsupported;
8043 }
8044 break;
8045
8046 case R_ARM_TLS_DESCSEQ:
8047 /* arm insn. */
8048 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8049 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8050 {
8051 if (is_local)
8052 /* mov rx, ry */
8053 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8054 contents + rel->r_offset);
8055 }
8056 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8057 {
8058 if (is_local)
8059 /* nop */
8060 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8061 else
8062 /* ldr rx,[ry] */
8063 bfd_put_32 (input_bfd, insn & 0xfffff000,
8064 contents + rel->r_offset);
8065 }
8066 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8067 {
8068 if (is_local)
8069 /* nop */
8070 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8071 else
8072 /* mov r0, rx */
8073 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8074 contents + rel->r_offset);
8075 }
8076 else
8077 {
8078 (*_bfd_error_handler)
8079 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8080 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8081 return bfd_reloc_notsupported;
8082 }
8083 break;
8084
8085 case R_ARM_TLS_CALL:
8086 /* GD->IE relaxation, turn the instruction into 'nop' or
8087 'ldr r0, [pc,r0]' */
8088 insn = is_local ? 0xe1a00000 : 0xe79f0000;
8089 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8090 break;
8091
8092 case R_ARM_THM_TLS_CALL:
8093 /* GD->IE relaxation. */
8094 if (!is_local)
8095 /* add r0,pc; ldr r0, [r0] */
8096 insn = 0x44786800;
8097 else if (arch_has_thumb2_nop (globals))
8098 /* nop.w */
8099 insn = 0xf3af8000;
8100 else
8101 /* nop; nop */
8102 insn = 0xbf00bf00;
8103
8104 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
8105 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
8106 break;
8107 }
8108 return bfd_reloc_ok;
8109 }
8110
8111 /* For a given value of n, calculate the value of G_n as required to
8112 deal with group relocations. We return it in the form of an
8113 encoded constant-and-rotation, together with the final residual. If n is
8114 specified as less than zero, then final_residual is filled with the
8115 input value and no further action is performed. */
8116
8117 static bfd_vma
8118 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
8119 {
8120 int current_n;
8121 bfd_vma g_n;
8122 bfd_vma encoded_g_n = 0;
8123 bfd_vma residual = value; /* Also known as Y_n. */
8124
8125 for (current_n = 0; current_n <= n; current_n++)
8126 {
8127 int shift;
8128
8129 /* Calculate which part of the value to mask. */
8130 if (residual == 0)
8131 shift = 0;
8132 else
8133 {
8134 int msb;
8135
8136 /* Determine the most significant bit in the residual and
8137 align the resulting value to a 2-bit boundary. */
8138 for (msb = 30; msb >= 0; msb -= 2)
8139 if (residual & (3 << msb))
8140 break;
8141
8142 /* The desired shift is now (msb - 6), or zero, whichever
8143 is the greater. */
8144 shift = msb - 6;
8145 if (shift < 0)
8146 shift = 0;
8147 }
8148
8149 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
8150 g_n = residual & (0xff << shift);
8151 encoded_g_n = (g_n >> shift)
8152 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
8153
8154 /* Calculate the residual for the next time around. */
8155 residual &= ~g_n;
8156 }
8157
8158 *final_residual = residual;
8159
8160 return encoded_g_n;
8161 }
8162
8163 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
8164 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
8165
8166 static int
8167 identify_add_or_sub (bfd_vma insn)
8168 {
8169 int opcode = insn & 0x1e00000;
8170
8171 if (opcode == 1 << 23) /* ADD */
8172 return 1;
8173
8174 if (opcode == 1 << 22) /* SUB */
8175 return -1;
8176
8177 return 0;
8178 }
8179
8180 /* Perform a relocation as part of a final link. */
8181
8182 static bfd_reloc_status_type
8183 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8184 bfd * input_bfd,
8185 bfd * output_bfd,
8186 asection * input_section,
8187 bfd_byte * contents,
8188 Elf_Internal_Rela * rel,
8189 bfd_vma value,
8190 struct bfd_link_info * info,
8191 asection * sym_sec,
8192 const char * sym_name,
8193 unsigned char st_type,
8194 enum arm_st_branch_type branch_type,
8195 struct elf_link_hash_entry * h,
8196 bfd_boolean * unresolved_reloc_p,
8197 char ** error_message)
8198 {
8199 unsigned long r_type = howto->type;
8200 unsigned long r_symndx;
8201 bfd_byte * hit_data = contents + rel->r_offset;
8202 bfd_vma * local_got_offsets;
8203 bfd_vma * local_tlsdesc_gotents;
8204 asection * sgot;
8205 asection * splt;
8206 asection * sreloc = NULL;
8207 asection * srelgot;
8208 bfd_vma addend;
8209 bfd_signed_vma signed_addend;
8210 unsigned char dynreloc_st_type;
8211 bfd_vma dynreloc_value;
8212 struct elf32_arm_link_hash_table * globals;
8213 struct elf32_arm_link_hash_entry *eh;
8214 union gotplt_union *root_plt;
8215 struct arm_plt_info *arm_plt;
8216 bfd_vma plt_offset;
8217 bfd_vma gotplt_offset;
8218 bfd_boolean has_iplt_entry;
8219
8220 globals = elf32_arm_hash_table (info);
8221 if (globals == NULL)
8222 return bfd_reloc_notsupported;
8223
8224 BFD_ASSERT (is_arm_elf (input_bfd));
8225
8226 /* Some relocation types map to different relocations depending on the
8227 target. We pick the right one here. */
8228 r_type = arm_real_reloc_type (globals, r_type);
8229
8230 /* It is possible to have linker relaxations on some TLS access
8231 models. Update our information here. */
8232 r_type = elf32_arm_tls_transition (info, r_type, h);
8233
8234 if (r_type != howto->type)
8235 howto = elf32_arm_howto_from_type (r_type);
8236
8237 eh = (struct elf32_arm_link_hash_entry *) h;
8238 sgot = globals->root.sgot;
8239 local_got_offsets = elf_local_got_offsets (input_bfd);
8240 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8241
8242 if (globals->root.dynamic_sections_created)
8243 srelgot = globals->root.srelgot;
8244 else
8245 srelgot = NULL;
8246
8247 r_symndx = ELF32_R_SYM (rel->r_info);
8248
8249 if (globals->use_rel)
8250 {
8251 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8252
8253 if (addend & ((howto->src_mask + 1) >> 1))
8254 {
8255 signed_addend = -1;
8256 signed_addend &= ~ howto->src_mask;
8257 signed_addend |= addend;
8258 }
8259 else
8260 signed_addend = addend;
8261 }
8262 else
8263 addend = signed_addend = rel->r_addend;
8264
8265 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
8266 are resolving a function call relocation. */
8267 if (using_thumb_only (globals)
8268 && (r_type == R_ARM_THM_CALL
8269 || r_type == R_ARM_THM_JUMP24)
8270 && branch_type == ST_BRANCH_TO_ARM)
8271 branch_type = ST_BRANCH_TO_THUMB;
8272
8273 /* Record the symbol information that should be used in dynamic
8274 relocations. */
8275 dynreloc_st_type = st_type;
8276 dynreloc_value = value;
8277 if (branch_type == ST_BRANCH_TO_THUMB)
8278 dynreloc_value |= 1;
8279
8280 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8281 VALUE appropriately for relocations that we resolve at link time. */
8282 has_iplt_entry = FALSE;
8283 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8284 && root_plt->offset != (bfd_vma) -1)
8285 {
8286 plt_offset = root_plt->offset;
8287 gotplt_offset = arm_plt->got_offset;
8288
8289 if (h == NULL || eh->is_iplt)
8290 {
8291 has_iplt_entry = TRUE;
8292 splt = globals->root.iplt;
8293
8294 /* Populate .iplt entries here, because not all of them will
8295 be seen by finish_dynamic_symbol. The lower bit is set if
8296 we have already populated the entry. */
8297 if (plt_offset & 1)
8298 plt_offset--;
8299 else
8300 {
8301 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8302 -1, dynreloc_value))
8303 root_plt->offset |= 1;
8304 else
8305 return bfd_reloc_notsupported;
8306 }
8307
8308 /* Static relocations always resolve to the .iplt entry. */
8309 st_type = STT_FUNC;
8310 value = (splt->output_section->vma
8311 + splt->output_offset
8312 + plt_offset);
8313 branch_type = ST_BRANCH_TO_ARM;
8314
8315 /* If there are non-call relocations that resolve to the .iplt
8316 entry, then all dynamic ones must too. */
8317 if (arm_plt->noncall_refcount != 0)
8318 {
8319 dynreloc_st_type = st_type;
8320 dynreloc_value = value;
8321 }
8322 }
8323 else
8324 /* We populate the .plt entry in finish_dynamic_symbol. */
8325 splt = globals->root.splt;
8326 }
8327 else
8328 {
8329 splt = NULL;
8330 plt_offset = (bfd_vma) -1;
8331 gotplt_offset = (bfd_vma) -1;
8332 }
8333
8334 switch (r_type)
8335 {
8336 case R_ARM_NONE:
8337 /* We don't need to find a value for this symbol. It's just a
8338 marker. */
8339 *unresolved_reloc_p = FALSE;
8340 return bfd_reloc_ok;
8341
8342 case R_ARM_ABS12:
8343 if (!globals->vxworks_p)
8344 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8345
8346 case R_ARM_PC24:
8347 case R_ARM_ABS32:
8348 case R_ARM_ABS32_NOI:
8349 case R_ARM_REL32:
8350 case R_ARM_REL32_NOI:
8351 case R_ARM_CALL:
8352 case R_ARM_JUMP24:
8353 case R_ARM_XPC25:
8354 case R_ARM_PREL31:
8355 case R_ARM_PLT32:
8356 /* Handle relocations which should use the PLT entry. ABS32/REL32
8357 will use the symbol's value, which may point to a PLT entry, but we
8358 don't need to handle that here. If we created a PLT entry, all
8359 branches in this object should go to it, except if the PLT is too
8360 far away, in which case a long branch stub should be inserted. */
8361 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8362 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8363 && r_type != R_ARM_CALL
8364 && r_type != R_ARM_JUMP24
8365 && r_type != R_ARM_PLT32)
8366 && plt_offset != (bfd_vma) -1)
8367 {
8368 /* If we've created a .plt section, and assigned a PLT entry
8369 to this function, it must either be a STT_GNU_IFUNC reference
8370 or not be known to bind locally. In other cases, we should
8371 have cleared the PLT entry by now. */
8372 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8373
8374 value = (splt->output_section->vma
8375 + splt->output_offset
8376 + plt_offset);
8377 *unresolved_reloc_p = FALSE;
8378 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8379 contents, rel->r_offset, value,
8380 rel->r_addend);
8381 }
8382
8383 /* When generating a shared object or relocatable executable, these
8384 relocations are copied into the output file to be resolved at
8385 run time. */
8386 if ((info->shared || globals->root.is_relocatable_executable)
8387 && (input_section->flags & SEC_ALLOC)
8388 && !(globals->vxworks_p
8389 && strcmp (input_section->output_section->name,
8390 ".tls_vars") == 0)
8391 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8392 || !SYMBOL_CALLS_LOCAL (info, h))
8393 && !(input_bfd == globals->stub_bfd
8394 && strstr (input_section->name, STUB_SUFFIX))
8395 && (h == NULL
8396 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8397 || h->root.type != bfd_link_hash_undefweak)
8398 && r_type != R_ARM_PC24
8399 && r_type != R_ARM_CALL
8400 && r_type != R_ARM_JUMP24
8401 && r_type != R_ARM_PREL31
8402 && r_type != R_ARM_PLT32)
8403 {
8404 Elf_Internal_Rela outrel;
8405 bfd_boolean skip, relocate;
8406
8407 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
8408 && !h->def_regular)
8409 {
8410 char *v = _("shared object");
8411
8412 if (info->executable)
8413 v = _("PIE executable");
8414
8415 (*_bfd_error_handler)
8416 (_("%B: relocation %s against external or undefined symbol `%s'"
8417 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
8418 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
8419 return bfd_reloc_notsupported;
8420 }
8421
8422 *unresolved_reloc_p = FALSE;
8423
8424 if (sreloc == NULL && globals->root.dynamic_sections_created)
8425 {
8426 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8427 ! globals->use_rel);
8428
8429 if (sreloc == NULL)
8430 return bfd_reloc_notsupported;
8431 }
8432
8433 skip = FALSE;
8434 relocate = FALSE;
8435
8436 outrel.r_addend = addend;
8437 outrel.r_offset =
8438 _bfd_elf_section_offset (output_bfd, info, input_section,
8439 rel->r_offset);
8440 if (outrel.r_offset == (bfd_vma) -1)
8441 skip = TRUE;
8442 else if (outrel.r_offset == (bfd_vma) -2)
8443 skip = TRUE, relocate = TRUE;
8444 outrel.r_offset += (input_section->output_section->vma
8445 + input_section->output_offset);
8446
8447 if (skip)
8448 memset (&outrel, 0, sizeof outrel);
8449 else if (h != NULL
8450 && h->dynindx != -1
8451 && (!info->shared
8452 || !SYMBOLIC_BIND (info, h)
8453 || !h->def_regular))
8454 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8455 else
8456 {
8457 int symbol;
8458
8459 /* This symbol is local, or marked to become local. */
8460 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8461 if (globals->symbian_p)
8462 {
8463 asection *osec;
8464
8465 /* On Symbian OS, the data segment and text segement
8466 can be relocated independently. Therefore, we
8467 must indicate the segment to which this
8468 relocation is relative. The BPABI allows us to
8469 use any symbol in the right segment; we just use
8470 the section symbol as it is convenient. (We
8471 cannot use the symbol given by "h" directly as it
8472 will not appear in the dynamic symbol table.)
8473
8474 Note that the dynamic linker ignores the section
8475 symbol value, so we don't subtract osec->vma
8476 from the emitted reloc addend. */
8477 if (sym_sec)
8478 osec = sym_sec->output_section;
8479 else
8480 osec = input_section->output_section;
8481 symbol = elf_section_data (osec)->dynindx;
8482 if (symbol == 0)
8483 {
8484 struct elf_link_hash_table *htab = elf_hash_table (info);
8485
8486 if ((osec->flags & SEC_READONLY) == 0
8487 && htab->data_index_section != NULL)
8488 osec = htab->data_index_section;
8489 else
8490 osec = htab->text_index_section;
8491 symbol = elf_section_data (osec)->dynindx;
8492 }
8493 BFD_ASSERT (symbol != 0);
8494 }
8495 else
8496 /* On SVR4-ish systems, the dynamic loader cannot
8497 relocate the text and data segments independently,
8498 so the symbol does not matter. */
8499 symbol = 0;
8500 if (dynreloc_st_type == STT_GNU_IFUNC)
8501 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8502 to the .iplt entry. Instead, every non-call reference
8503 must use an R_ARM_IRELATIVE relocation to obtain the
8504 correct run-time address. */
8505 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8506 else
8507 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8508 if (globals->use_rel)
8509 relocate = TRUE;
8510 else
8511 outrel.r_addend += dynreloc_value;
8512 }
8513
8514 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8515
8516 /* If this reloc is against an external symbol, we do not want to
8517 fiddle with the addend. Otherwise, we need to include the symbol
8518 value so that it becomes an addend for the dynamic reloc. */
8519 if (! relocate)
8520 return bfd_reloc_ok;
8521
8522 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8523 contents, rel->r_offset,
8524 dynreloc_value, (bfd_vma) 0);
8525 }
8526 else switch (r_type)
8527 {
8528 case R_ARM_ABS12:
8529 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8530
8531 case R_ARM_XPC25: /* Arm BLX instruction. */
8532 case R_ARM_CALL:
8533 case R_ARM_JUMP24:
8534 case R_ARM_PC24: /* Arm B/BL instruction. */
8535 case R_ARM_PLT32:
8536 {
8537 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8538
8539 if (r_type == R_ARM_XPC25)
8540 {
8541 /* Check for Arm calling Arm function. */
8542 /* FIXME: Should we translate the instruction into a BL
8543 instruction instead ? */
8544 if (branch_type != ST_BRANCH_TO_THUMB)
8545 (*_bfd_error_handler)
8546 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8547 input_bfd,
8548 h ? h->root.root.string : "(local)");
8549 }
8550 else if (r_type == R_ARM_PC24)
8551 {
8552 /* Check for Arm calling Thumb function. */
8553 if (branch_type == ST_BRANCH_TO_THUMB)
8554 {
8555 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8556 output_bfd, input_section,
8557 hit_data, sym_sec, rel->r_offset,
8558 signed_addend, value,
8559 error_message))
8560 return bfd_reloc_ok;
8561 else
8562 return bfd_reloc_dangerous;
8563 }
8564 }
8565
8566 /* Check if a stub has to be inserted because the
8567 destination is too far or we are changing mode. */
8568 if ( r_type == R_ARM_CALL
8569 || r_type == R_ARM_JUMP24
8570 || r_type == R_ARM_PLT32)
8571 {
8572 enum elf32_arm_stub_type stub_type = arm_stub_none;
8573 struct elf32_arm_link_hash_entry *hash;
8574
8575 hash = (struct elf32_arm_link_hash_entry *) h;
8576 stub_type = arm_type_of_stub (info, input_section, rel,
8577 st_type, &branch_type,
8578 hash, value, sym_sec,
8579 input_bfd, sym_name);
8580
8581 if (stub_type != arm_stub_none)
8582 {
8583 /* The target is out of reach, so redirect the
8584 branch to the local stub for this function. */
8585 stub_entry = elf32_arm_get_stub_entry (input_section,
8586 sym_sec, h,
8587 rel, globals,
8588 stub_type);
8589 {
8590 if (stub_entry != NULL)
8591 value = (stub_entry->stub_offset
8592 + stub_entry->stub_sec->output_offset
8593 + stub_entry->stub_sec->output_section->vma);
8594
8595 if (plt_offset != (bfd_vma) -1)
8596 *unresolved_reloc_p = FALSE;
8597 }
8598 }
8599 else
8600 {
8601 /* If the call goes through a PLT entry, make sure to
8602 check distance to the right destination address. */
8603 if (plt_offset != (bfd_vma) -1)
8604 {
8605 value = (splt->output_section->vma
8606 + splt->output_offset
8607 + plt_offset);
8608 *unresolved_reloc_p = FALSE;
8609 /* The PLT entry is in ARM mode, regardless of the
8610 target function. */
8611 branch_type = ST_BRANCH_TO_ARM;
8612 }
8613 }
8614 }
8615
8616 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8617 where:
8618 S is the address of the symbol in the relocation.
8619 P is address of the instruction being relocated.
8620 A is the addend (extracted from the instruction) in bytes.
8621
8622 S is held in 'value'.
8623 P is the base address of the section containing the
8624 instruction plus the offset of the reloc into that
8625 section, ie:
8626 (input_section->output_section->vma +
8627 input_section->output_offset +
8628 rel->r_offset).
8629 A is the addend, converted into bytes, ie:
8630 (signed_addend * 4)
8631
8632 Note: None of these operations have knowledge of the pipeline
8633 size of the processor, thus it is up to the assembler to
8634 encode this information into the addend. */
8635 value -= (input_section->output_section->vma
8636 + input_section->output_offset);
8637 value -= rel->r_offset;
8638 if (globals->use_rel)
8639 value += (signed_addend << howto->size);
8640 else
8641 /* RELA addends do not have to be adjusted by howto->size. */
8642 value += signed_addend;
8643
8644 signed_addend = value;
8645 signed_addend >>= howto->rightshift;
8646
8647 /* A branch to an undefined weak symbol is turned into a jump to
8648 the next instruction unless a PLT entry will be created.
8649 Do the same for local undefined symbols (but not for STN_UNDEF).
8650 The jump to the next instruction is optimized as a NOP depending
8651 on the architecture. */
8652 if (h ? (h->root.type == bfd_link_hash_undefweak
8653 && plt_offset == (bfd_vma) -1)
8654 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8655 {
8656 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8657
8658 if (arch_has_arm_nop (globals))
8659 value |= 0x0320f000;
8660 else
8661 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8662 }
8663 else
8664 {
8665 /* Perform a signed range check. */
8666 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8667 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8668 return bfd_reloc_overflow;
8669
8670 addend = (value & 2);
8671
8672 value = (signed_addend & howto->dst_mask)
8673 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8674
8675 if (r_type == R_ARM_CALL)
8676 {
8677 /* Set the H bit in the BLX instruction. */
8678 if (branch_type == ST_BRANCH_TO_THUMB)
8679 {
8680 if (addend)
8681 value |= (1 << 24);
8682 else
8683 value &= ~(bfd_vma)(1 << 24);
8684 }
8685
8686 /* Select the correct instruction (BL or BLX). */
8687 /* Only if we are not handling a BL to a stub. In this
8688 case, mode switching is performed by the stub. */
8689 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8690 value |= (1 << 28);
8691 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8692 {
8693 value &= ~(bfd_vma)(1 << 28);
8694 value |= (1 << 24);
8695 }
8696 }
8697 }
8698 }
8699 break;
8700
8701 case R_ARM_ABS32:
8702 value += addend;
8703 if (branch_type == ST_BRANCH_TO_THUMB)
8704 value |= 1;
8705 break;
8706
8707 case R_ARM_ABS32_NOI:
8708 value += addend;
8709 break;
8710
8711 case R_ARM_REL32:
8712 value += addend;
8713 if (branch_type == ST_BRANCH_TO_THUMB)
8714 value |= 1;
8715 value -= (input_section->output_section->vma
8716 + input_section->output_offset + rel->r_offset);
8717 break;
8718
8719 case R_ARM_REL32_NOI:
8720 value += addend;
8721 value -= (input_section->output_section->vma
8722 + input_section->output_offset + rel->r_offset);
8723 break;
8724
8725 case R_ARM_PREL31:
8726 value -= (input_section->output_section->vma
8727 + input_section->output_offset + rel->r_offset);
8728 value += signed_addend;
8729 if (! h || h->root.type != bfd_link_hash_undefweak)
8730 {
8731 /* Check for overflow. */
8732 if ((value ^ (value >> 1)) & (1 << 30))
8733 return bfd_reloc_overflow;
8734 }
8735 value &= 0x7fffffff;
8736 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8737 if (branch_type == ST_BRANCH_TO_THUMB)
8738 value |= 1;
8739 break;
8740 }
8741
8742 bfd_put_32 (input_bfd, value, hit_data);
8743 return bfd_reloc_ok;
8744
8745 case R_ARM_ABS8:
8746 /* PR 16202: Refectch the addend using the correct size. */
8747 if (globals->use_rel)
8748 addend = bfd_get_8 (input_bfd, hit_data);
8749 value += addend;
8750
8751 /* There is no way to tell whether the user intended to use a signed or
8752 unsigned addend. When checking for overflow we accept either,
8753 as specified by the AAELF. */
8754 if ((long) value > 0xff || (long) value < -0x80)
8755 return bfd_reloc_overflow;
8756
8757 bfd_put_8 (input_bfd, value, hit_data);
8758 return bfd_reloc_ok;
8759
8760 case R_ARM_ABS16:
8761 /* PR 16202: Refectch the addend using the correct size. */
8762 if (globals->use_rel)
8763 addend = bfd_get_16 (input_bfd, hit_data);
8764 value += addend;
8765
8766 /* See comment for R_ARM_ABS8. */
8767 if ((long) value > 0xffff || (long) value < -0x8000)
8768 return bfd_reloc_overflow;
8769
8770 bfd_put_16 (input_bfd, value, hit_data);
8771 return bfd_reloc_ok;
8772
8773 case R_ARM_THM_ABS5:
8774 /* Support ldr and str instructions for the thumb. */
8775 if (globals->use_rel)
8776 {
8777 /* Need to refetch addend. */
8778 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8779 /* ??? Need to determine shift amount from operand size. */
8780 addend >>= howto->rightshift;
8781 }
8782 value += addend;
8783
8784 /* ??? Isn't value unsigned? */
8785 if ((long) value > 0x1f || (long) value < -0x10)
8786 return bfd_reloc_overflow;
8787
8788 /* ??? Value needs to be properly shifted into place first. */
8789 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8790 bfd_put_16 (input_bfd, value, hit_data);
8791 return bfd_reloc_ok;
8792
8793 case R_ARM_THM_ALU_PREL_11_0:
8794 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8795 {
8796 bfd_vma insn;
8797 bfd_signed_vma relocation;
8798
8799 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8800 | bfd_get_16 (input_bfd, hit_data + 2);
8801
8802 if (globals->use_rel)
8803 {
8804 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8805 | ((insn & (1 << 26)) >> 15);
8806 if (insn & 0xf00000)
8807 signed_addend = -signed_addend;
8808 }
8809
8810 relocation = value + signed_addend;
8811 relocation -= Pa (input_section->output_section->vma
8812 + input_section->output_offset
8813 + rel->r_offset);
8814
8815 value = abs (relocation);
8816
8817 if (value >= 0x1000)
8818 return bfd_reloc_overflow;
8819
8820 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8821 | ((value & 0x700) << 4)
8822 | ((value & 0x800) << 15);
8823 if (relocation < 0)
8824 insn |= 0xa00000;
8825
8826 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8827 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8828
8829 return bfd_reloc_ok;
8830 }
8831
8832 case R_ARM_THM_PC8:
8833 /* PR 10073: This reloc is not generated by the GNU toolchain,
8834 but it is supported for compatibility with third party libraries
8835 generated by other compilers, specifically the ARM/IAR. */
8836 {
8837 bfd_vma insn;
8838 bfd_signed_vma relocation;
8839
8840 insn = bfd_get_16 (input_bfd, hit_data);
8841
8842 if (globals->use_rel)
8843 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
8844
8845 relocation = value + addend;
8846 relocation -= Pa (input_section->output_section->vma
8847 + input_section->output_offset
8848 + rel->r_offset);
8849
8850 value = abs (relocation);
8851
8852 /* We do not check for overflow of this reloc. Although strictly
8853 speaking this is incorrect, it appears to be necessary in order
8854 to work with IAR generated relocs. Since GCC and GAS do not
8855 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8856 a problem for them. */
8857 value &= 0x3fc;
8858
8859 insn = (insn & 0xff00) | (value >> 2);
8860
8861 bfd_put_16 (input_bfd, insn, hit_data);
8862
8863 return bfd_reloc_ok;
8864 }
8865
8866 case R_ARM_THM_PC12:
8867 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8868 {
8869 bfd_vma insn;
8870 bfd_signed_vma relocation;
8871
8872 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8873 | bfd_get_16 (input_bfd, hit_data + 2);
8874
8875 if (globals->use_rel)
8876 {
8877 signed_addend = insn & 0xfff;
8878 if (!(insn & (1 << 23)))
8879 signed_addend = -signed_addend;
8880 }
8881
8882 relocation = value + signed_addend;
8883 relocation -= Pa (input_section->output_section->vma
8884 + input_section->output_offset
8885 + rel->r_offset);
8886
8887 value = abs (relocation);
8888
8889 if (value >= 0x1000)
8890 return bfd_reloc_overflow;
8891
8892 insn = (insn & 0xff7ff000) | value;
8893 if (relocation >= 0)
8894 insn |= (1 << 23);
8895
8896 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8897 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8898
8899 return bfd_reloc_ok;
8900 }
8901
8902 case R_ARM_THM_XPC22:
8903 case R_ARM_THM_CALL:
8904 case R_ARM_THM_JUMP24:
8905 /* Thumb BL (branch long instruction). */
8906 {
8907 bfd_vma relocation;
8908 bfd_vma reloc_sign;
8909 bfd_boolean overflow = FALSE;
8910 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8911 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8912 bfd_signed_vma reloc_signed_max;
8913 bfd_signed_vma reloc_signed_min;
8914 bfd_vma check;
8915 bfd_signed_vma signed_check;
8916 int bitsize;
8917 const int thumb2 = using_thumb2 (globals);
8918
8919 /* A branch to an undefined weak symbol is turned into a jump to
8920 the next instruction unless a PLT entry will be created.
8921 The jump to the next instruction is optimized as a NOP.W for
8922 Thumb-2 enabled architectures. */
8923 if (h && h->root.type == bfd_link_hash_undefweak
8924 && plt_offset == (bfd_vma) -1)
8925 {
8926 if (arch_has_thumb2_nop (globals))
8927 {
8928 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8929 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8930 }
8931 else
8932 {
8933 bfd_put_16 (input_bfd, 0xe000, hit_data);
8934 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8935 }
8936 return bfd_reloc_ok;
8937 }
8938
8939 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8940 with Thumb-1) involving the J1 and J2 bits. */
8941 if (globals->use_rel)
8942 {
8943 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8944 bfd_vma upper = upper_insn & 0x3ff;
8945 bfd_vma lower = lower_insn & 0x7ff;
8946 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8947 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8948 bfd_vma i1 = j1 ^ s ? 0 : 1;
8949 bfd_vma i2 = j2 ^ s ? 0 : 1;
8950
8951 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8952 /* Sign extend. */
8953 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8954
8955 signed_addend = addend;
8956 }
8957
8958 if (r_type == R_ARM_THM_XPC22)
8959 {
8960 /* Check for Thumb to Thumb call. */
8961 /* FIXME: Should we translate the instruction into a BL
8962 instruction instead ? */
8963 if (branch_type == ST_BRANCH_TO_THUMB)
8964 (*_bfd_error_handler)
8965 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8966 input_bfd,
8967 h ? h->root.root.string : "(local)");
8968 }
8969 else
8970 {
8971 /* If it is not a call to Thumb, assume call to Arm.
8972 If it is a call relative to a section name, then it is not a
8973 function call at all, but rather a long jump. Calls through
8974 the PLT do not require stubs. */
8975 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8976 {
8977 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8978 {
8979 /* Convert BL to BLX. */
8980 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8981 }
8982 else if (( r_type != R_ARM_THM_CALL)
8983 && (r_type != R_ARM_THM_JUMP24))
8984 {
8985 if (elf32_thumb_to_arm_stub
8986 (info, sym_name, input_bfd, output_bfd, input_section,
8987 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8988 error_message))
8989 return bfd_reloc_ok;
8990 else
8991 return bfd_reloc_dangerous;
8992 }
8993 }
8994 else if (branch_type == ST_BRANCH_TO_THUMB
8995 && globals->use_blx
8996 && r_type == R_ARM_THM_CALL)
8997 {
8998 /* Make sure this is a BL. */
8999 lower_insn |= 0x1800;
9000 }
9001 }
9002
9003 enum elf32_arm_stub_type stub_type = arm_stub_none;
9004 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9005 {
9006 /* Check if a stub has to be inserted because the destination
9007 is too far. */
9008 struct elf32_arm_stub_hash_entry *stub_entry;
9009 struct elf32_arm_link_hash_entry *hash;
9010
9011 hash = (struct elf32_arm_link_hash_entry *) h;
9012
9013 stub_type = arm_type_of_stub (info, input_section, rel,
9014 st_type, &branch_type,
9015 hash, value, sym_sec,
9016 input_bfd, sym_name);
9017
9018 if (stub_type != arm_stub_none)
9019 {
9020 /* The target is out of reach or we are changing modes, so
9021 redirect the branch to the local stub for this
9022 function. */
9023 stub_entry = elf32_arm_get_stub_entry (input_section,
9024 sym_sec, h,
9025 rel, globals,
9026 stub_type);
9027 if (stub_entry != NULL)
9028 {
9029 value = (stub_entry->stub_offset
9030 + stub_entry->stub_sec->output_offset
9031 + stub_entry->stub_sec->output_section->vma);
9032
9033 if (plt_offset != (bfd_vma) -1)
9034 *unresolved_reloc_p = FALSE;
9035 }
9036
9037 /* If this call becomes a call to Arm, force BLX. */
9038 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9039 {
9040 if ((stub_entry
9041 && !arm_stub_is_thumb (stub_entry->stub_type))
9042 || branch_type != ST_BRANCH_TO_THUMB)
9043 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9044 }
9045 }
9046 }
9047
9048 /* Handle calls via the PLT. */
9049 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9050 {
9051 value = (splt->output_section->vma
9052 + splt->output_offset
9053 + plt_offset);
9054
9055 if (globals->use_blx
9056 && r_type == R_ARM_THM_CALL
9057 && ! using_thumb_only (globals))
9058 {
9059 /* If the Thumb BLX instruction is available, convert
9060 the BL to a BLX instruction to call the ARM-mode
9061 PLT entry. */
9062 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9063 branch_type = ST_BRANCH_TO_ARM;
9064 }
9065 else
9066 {
9067 if (! using_thumb_only (globals))
9068 /* Target the Thumb stub before the ARM PLT entry. */
9069 value -= PLT_THUMB_STUB_SIZE;
9070 branch_type = ST_BRANCH_TO_THUMB;
9071 }
9072 *unresolved_reloc_p = FALSE;
9073 }
9074
9075 relocation = value + signed_addend;
9076
9077 relocation -= (input_section->output_section->vma
9078 + input_section->output_offset
9079 + rel->r_offset);
9080
9081 check = relocation >> howto->rightshift;
9082
9083 /* If this is a signed value, the rightshift just dropped
9084 leading 1 bits (assuming twos complement). */
9085 if ((bfd_signed_vma) relocation >= 0)
9086 signed_check = check;
9087 else
9088 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
9089
9090 /* Calculate the permissable maximum and minimum values for
9091 this relocation according to whether we're relocating for
9092 Thumb-2 or not. */
9093 bitsize = howto->bitsize;
9094 if (!thumb2)
9095 bitsize -= 2;
9096 reloc_signed_max = (1 << (bitsize - 1)) - 1;
9097 reloc_signed_min = ~reloc_signed_max;
9098
9099 /* Assumes two's complement. */
9100 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9101 overflow = TRUE;
9102
9103 if ((lower_insn & 0x5000) == 0x4000)
9104 /* For a BLX instruction, make sure that the relocation is rounded up
9105 to a word boundary. This follows the semantics of the instruction
9106 which specifies that bit 1 of the target address will come from bit
9107 1 of the base address. */
9108 relocation = (relocation + 2) & ~ 3;
9109
9110 /* Put RELOCATION back into the insn. Assumes two's complement.
9111 We use the Thumb-2 encoding, which is safe even if dealing with
9112 a Thumb-1 instruction by virtue of our overflow check above. */
9113 reloc_sign = (signed_check < 0) ? 1 : 0;
9114 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
9115 | ((relocation >> 12) & 0x3ff)
9116 | (reloc_sign << 10);
9117 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
9118 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
9119 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
9120 | ((relocation >> 1) & 0x7ff);
9121
9122 /* Put the relocated value back in the object file: */
9123 bfd_put_16 (input_bfd, upper_insn, hit_data);
9124 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9125
9126 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9127 }
9128 break;
9129
9130 case R_ARM_THM_JUMP19:
9131 /* Thumb32 conditional branch instruction. */
9132 {
9133 bfd_vma relocation;
9134 bfd_boolean overflow = FALSE;
9135 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9136 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9137 bfd_signed_vma reloc_signed_max = 0xffffe;
9138 bfd_signed_vma reloc_signed_min = -0x100000;
9139 bfd_signed_vma signed_check;
9140 enum elf32_arm_stub_type stub_type = arm_stub_none;
9141 struct elf32_arm_stub_hash_entry *stub_entry;
9142 struct elf32_arm_link_hash_entry *hash;
9143
9144 /* Need to refetch the addend, reconstruct the top three bits,
9145 and squish the two 11 bit pieces together. */
9146 if (globals->use_rel)
9147 {
9148 bfd_vma S = (upper_insn & 0x0400) >> 10;
9149 bfd_vma upper = (upper_insn & 0x003f);
9150 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
9151 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
9152 bfd_vma lower = (lower_insn & 0x07ff);
9153
9154 upper |= J1 << 6;
9155 upper |= J2 << 7;
9156 upper |= (!S) << 8;
9157 upper -= 0x0100; /* Sign extend. */
9158
9159 addend = (upper << 12) | (lower << 1);
9160 signed_addend = addend;
9161 }
9162
9163 /* Handle calls via the PLT. */
9164 if (plt_offset != (bfd_vma) -1)
9165 {
9166 value = (splt->output_section->vma
9167 + splt->output_offset
9168 + plt_offset);
9169 /* Target the Thumb stub before the ARM PLT entry. */
9170 value -= PLT_THUMB_STUB_SIZE;
9171 *unresolved_reloc_p = FALSE;
9172 }
9173
9174 hash = (struct elf32_arm_link_hash_entry *)h;
9175
9176 stub_type = arm_type_of_stub (info, input_section, rel,
9177 st_type, &branch_type,
9178 hash, value, sym_sec,
9179 input_bfd, sym_name);
9180 if (stub_type != arm_stub_none)
9181 {
9182 stub_entry = elf32_arm_get_stub_entry (input_section,
9183 sym_sec, h,
9184 rel, globals,
9185 stub_type);
9186 if (stub_entry != NULL)
9187 {
9188 value = (stub_entry->stub_offset
9189 + stub_entry->stub_sec->output_offset
9190 + stub_entry->stub_sec->output_section->vma);
9191 }
9192 }
9193
9194 relocation = value + signed_addend;
9195 relocation -= (input_section->output_section->vma
9196 + input_section->output_offset
9197 + rel->r_offset);
9198 signed_check = (bfd_signed_vma) relocation;
9199
9200 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9201 overflow = TRUE;
9202
9203 /* Put RELOCATION back into the insn. */
9204 {
9205 bfd_vma S = (relocation & 0x00100000) >> 20;
9206 bfd_vma J2 = (relocation & 0x00080000) >> 19;
9207 bfd_vma J1 = (relocation & 0x00040000) >> 18;
9208 bfd_vma hi = (relocation & 0x0003f000) >> 12;
9209 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
9210
9211 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
9212 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9213 }
9214
9215 /* Put the relocated value back in the object file: */
9216 bfd_put_16 (input_bfd, upper_insn, hit_data);
9217 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9218
9219 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9220 }
9221
9222 case R_ARM_THM_JUMP11:
9223 case R_ARM_THM_JUMP8:
9224 case R_ARM_THM_JUMP6:
9225 /* Thumb B (branch) instruction). */
9226 {
9227 bfd_signed_vma relocation;
9228 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9229 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9230 bfd_signed_vma signed_check;
9231
9232 /* CZB cannot jump backward. */
9233 if (r_type == R_ARM_THM_JUMP6)
9234 reloc_signed_min = 0;
9235
9236 if (globals->use_rel)
9237 {
9238 /* Need to refetch addend. */
9239 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9240 if (addend & ((howto->src_mask + 1) >> 1))
9241 {
9242 signed_addend = -1;
9243 signed_addend &= ~ howto->src_mask;
9244 signed_addend |= addend;
9245 }
9246 else
9247 signed_addend = addend;
9248 /* The value in the insn has been right shifted. We need to
9249 undo this, so that we can perform the address calculation
9250 in terms of bytes. */
9251 signed_addend <<= howto->rightshift;
9252 }
9253 relocation = value + signed_addend;
9254
9255 relocation -= (input_section->output_section->vma
9256 + input_section->output_offset
9257 + rel->r_offset);
9258
9259 relocation >>= howto->rightshift;
9260 signed_check = relocation;
9261
9262 if (r_type == R_ARM_THM_JUMP6)
9263 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9264 else
9265 relocation &= howto->dst_mask;
9266 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9267
9268 bfd_put_16 (input_bfd, relocation, hit_data);
9269
9270 /* Assumes two's complement. */
9271 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9272 return bfd_reloc_overflow;
9273
9274 return bfd_reloc_ok;
9275 }
9276
9277 case R_ARM_ALU_PCREL7_0:
9278 case R_ARM_ALU_PCREL15_8:
9279 case R_ARM_ALU_PCREL23_15:
9280 {
9281 bfd_vma insn;
9282 bfd_vma relocation;
9283
9284 insn = bfd_get_32 (input_bfd, hit_data);
9285 if (globals->use_rel)
9286 {
9287 /* Extract the addend. */
9288 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9289 signed_addend = addend;
9290 }
9291 relocation = value + signed_addend;
9292
9293 relocation -= (input_section->output_section->vma
9294 + input_section->output_offset
9295 + rel->r_offset);
9296 insn = (insn & ~0xfff)
9297 | ((howto->bitpos << 7) & 0xf00)
9298 | ((relocation >> howto->bitpos) & 0xff);
9299 bfd_put_32 (input_bfd, value, hit_data);
9300 }
9301 return bfd_reloc_ok;
9302
9303 case R_ARM_GNU_VTINHERIT:
9304 case R_ARM_GNU_VTENTRY:
9305 return bfd_reloc_ok;
9306
9307 case R_ARM_GOTOFF32:
9308 /* Relocation is relative to the start of the
9309 global offset table. */
9310
9311 BFD_ASSERT (sgot != NULL);
9312 if (sgot == NULL)
9313 return bfd_reloc_notsupported;
9314
9315 /* If we are addressing a Thumb function, we need to adjust the
9316 address by one, so that attempts to call the function pointer will
9317 correctly interpret it as Thumb code. */
9318 if (branch_type == ST_BRANCH_TO_THUMB)
9319 value += 1;
9320
9321 /* Note that sgot->output_offset is not involved in this
9322 calculation. We always want the start of .got. If we
9323 define _GLOBAL_OFFSET_TABLE in a different way, as is
9324 permitted by the ABI, we might have to change this
9325 calculation. */
9326 value -= sgot->output_section->vma;
9327 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9328 contents, rel->r_offset, value,
9329 rel->r_addend);
9330
9331 case R_ARM_GOTPC:
9332 /* Use global offset table as symbol value. */
9333 BFD_ASSERT (sgot != NULL);
9334
9335 if (sgot == NULL)
9336 return bfd_reloc_notsupported;
9337
9338 *unresolved_reloc_p = FALSE;
9339 value = sgot->output_section->vma;
9340 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9341 contents, rel->r_offset, value,
9342 rel->r_addend);
9343
9344 case R_ARM_GOT32:
9345 case R_ARM_GOT_PREL:
9346 /* Relocation is to the entry for this symbol in the
9347 global offset table. */
9348 if (sgot == NULL)
9349 return bfd_reloc_notsupported;
9350
9351 if (dynreloc_st_type == STT_GNU_IFUNC
9352 && plt_offset != (bfd_vma) -1
9353 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9354 {
9355 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9356 symbol, and the relocation resolves directly to the runtime
9357 target rather than to the .iplt entry. This means that any
9358 .got entry would be the same value as the .igot.plt entry,
9359 so there's no point creating both. */
9360 sgot = globals->root.igotplt;
9361 value = sgot->output_offset + gotplt_offset;
9362 }
9363 else if (h != NULL)
9364 {
9365 bfd_vma off;
9366
9367 off = h->got.offset;
9368 BFD_ASSERT (off != (bfd_vma) -1);
9369 if ((off & 1) != 0)
9370 {
9371 /* We have already processsed one GOT relocation against
9372 this symbol. */
9373 off &= ~1;
9374 if (globals->root.dynamic_sections_created
9375 && !SYMBOL_REFERENCES_LOCAL (info, h))
9376 *unresolved_reloc_p = FALSE;
9377 }
9378 else
9379 {
9380 Elf_Internal_Rela outrel;
9381
9382 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
9383 {
9384 /* If the symbol doesn't resolve locally in a static
9385 object, we have an undefined reference. If the
9386 symbol doesn't resolve locally in a dynamic object,
9387 it should be resolved by the dynamic linker. */
9388 if (globals->root.dynamic_sections_created)
9389 {
9390 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9391 *unresolved_reloc_p = FALSE;
9392 }
9393 else
9394 outrel.r_info = 0;
9395 outrel.r_addend = 0;
9396 }
9397 else
9398 {
9399 if (dynreloc_st_type == STT_GNU_IFUNC)
9400 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9401 else if (info->shared &&
9402 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9403 || h->root.type != bfd_link_hash_undefweak))
9404 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9405 else
9406 outrel.r_info = 0;
9407 outrel.r_addend = dynreloc_value;
9408 }
9409
9410 /* The GOT entry is initialized to zero by default.
9411 See if we should install a different value. */
9412 if (outrel.r_addend != 0
9413 && (outrel.r_info == 0 || globals->use_rel))
9414 {
9415 bfd_put_32 (output_bfd, outrel.r_addend,
9416 sgot->contents + off);
9417 outrel.r_addend = 0;
9418 }
9419
9420 if (outrel.r_info != 0)
9421 {
9422 outrel.r_offset = (sgot->output_section->vma
9423 + sgot->output_offset
9424 + off);
9425 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9426 }
9427 h->got.offset |= 1;
9428 }
9429 value = sgot->output_offset + off;
9430 }
9431 else
9432 {
9433 bfd_vma off;
9434
9435 BFD_ASSERT (local_got_offsets != NULL &&
9436 local_got_offsets[r_symndx] != (bfd_vma) -1);
9437
9438 off = local_got_offsets[r_symndx];
9439
9440 /* The offset must always be a multiple of 4. We use the
9441 least significant bit to record whether we have already
9442 generated the necessary reloc. */
9443 if ((off & 1) != 0)
9444 off &= ~1;
9445 else
9446 {
9447 if (globals->use_rel)
9448 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9449
9450 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9451 {
9452 Elf_Internal_Rela outrel;
9453
9454 outrel.r_addend = addend + dynreloc_value;
9455 outrel.r_offset = (sgot->output_section->vma
9456 + sgot->output_offset
9457 + off);
9458 if (dynreloc_st_type == STT_GNU_IFUNC)
9459 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9460 else
9461 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9462 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9463 }
9464
9465 local_got_offsets[r_symndx] |= 1;
9466 }
9467
9468 value = sgot->output_offset + off;
9469 }
9470 if (r_type != R_ARM_GOT32)
9471 value += sgot->output_section->vma;
9472
9473 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9474 contents, rel->r_offset, value,
9475 rel->r_addend);
9476
9477 case R_ARM_TLS_LDO32:
9478 value = value - dtpoff_base (info);
9479
9480 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9481 contents, rel->r_offset, value,
9482 rel->r_addend);
9483
9484 case R_ARM_TLS_LDM32:
9485 {
9486 bfd_vma off;
9487
9488 if (sgot == NULL)
9489 abort ();
9490
9491 off = globals->tls_ldm_got.offset;
9492
9493 if ((off & 1) != 0)
9494 off &= ~1;
9495 else
9496 {
9497 /* If we don't know the module number, create a relocation
9498 for it. */
9499 if (info->shared)
9500 {
9501 Elf_Internal_Rela outrel;
9502
9503 if (srelgot == NULL)
9504 abort ();
9505
9506 outrel.r_addend = 0;
9507 outrel.r_offset = (sgot->output_section->vma
9508 + sgot->output_offset + off);
9509 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9510
9511 if (globals->use_rel)
9512 bfd_put_32 (output_bfd, outrel.r_addend,
9513 sgot->contents + off);
9514
9515 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9516 }
9517 else
9518 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9519
9520 globals->tls_ldm_got.offset |= 1;
9521 }
9522
9523 value = sgot->output_section->vma + sgot->output_offset + off
9524 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9525
9526 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9527 contents, rel->r_offset, value,
9528 rel->r_addend);
9529 }
9530
9531 case R_ARM_TLS_CALL:
9532 case R_ARM_THM_TLS_CALL:
9533 case R_ARM_TLS_GD32:
9534 case R_ARM_TLS_IE32:
9535 case R_ARM_TLS_GOTDESC:
9536 case R_ARM_TLS_DESCSEQ:
9537 case R_ARM_THM_TLS_DESCSEQ:
9538 {
9539 bfd_vma off, offplt;
9540 int indx = 0;
9541 char tls_type;
9542
9543 BFD_ASSERT (sgot != NULL);
9544
9545 if (h != NULL)
9546 {
9547 bfd_boolean dyn;
9548 dyn = globals->root.dynamic_sections_created;
9549 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9550 && (!info->shared
9551 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9552 {
9553 *unresolved_reloc_p = FALSE;
9554 indx = h->dynindx;
9555 }
9556 off = h->got.offset;
9557 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9558 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9559 }
9560 else
9561 {
9562 BFD_ASSERT (local_got_offsets != NULL);
9563 off = local_got_offsets[r_symndx];
9564 offplt = local_tlsdesc_gotents[r_symndx];
9565 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9566 }
9567
9568 /* Linker relaxations happens from one of the
9569 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9570 if (ELF32_R_TYPE(rel->r_info) != r_type)
9571 tls_type = GOT_TLS_IE;
9572
9573 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9574
9575 if ((off & 1) != 0)
9576 off &= ~1;
9577 else
9578 {
9579 bfd_boolean need_relocs = FALSE;
9580 Elf_Internal_Rela outrel;
9581 int cur_off = off;
9582
9583 /* The GOT entries have not been initialized yet. Do it
9584 now, and emit any relocations. If both an IE GOT and a
9585 GD GOT are necessary, we emit the GD first. */
9586
9587 if ((info->shared || indx != 0)
9588 && (h == NULL
9589 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9590 || h->root.type != bfd_link_hash_undefweak))
9591 {
9592 need_relocs = TRUE;
9593 BFD_ASSERT (srelgot != NULL);
9594 }
9595
9596 if (tls_type & GOT_TLS_GDESC)
9597 {
9598 bfd_byte *loc;
9599
9600 /* We should have relaxed, unless this is an undefined
9601 weak symbol. */
9602 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9603 || info->shared);
9604 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9605 <= globals->root.sgotplt->size);
9606
9607 outrel.r_addend = 0;
9608 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9609 + globals->root.sgotplt->output_offset
9610 + offplt
9611 + globals->sgotplt_jump_table_size);
9612
9613 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9614 sreloc = globals->root.srelplt;
9615 loc = sreloc->contents;
9616 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9617 BFD_ASSERT (loc + RELOC_SIZE (globals)
9618 <= sreloc->contents + sreloc->size);
9619
9620 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9621
9622 /* For globals, the first word in the relocation gets
9623 the relocation index and the top bit set, or zero,
9624 if we're binding now. For locals, it gets the
9625 symbol's offset in the tls section. */
9626 bfd_put_32 (output_bfd,
9627 !h ? value - elf_hash_table (info)->tls_sec->vma
9628 : info->flags & DF_BIND_NOW ? 0
9629 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9630 globals->root.sgotplt->contents + offplt
9631 + globals->sgotplt_jump_table_size);
9632
9633 /* Second word in the relocation is always zero. */
9634 bfd_put_32 (output_bfd, 0,
9635 globals->root.sgotplt->contents + offplt
9636 + globals->sgotplt_jump_table_size + 4);
9637 }
9638 if (tls_type & GOT_TLS_GD)
9639 {
9640 if (need_relocs)
9641 {
9642 outrel.r_addend = 0;
9643 outrel.r_offset = (sgot->output_section->vma
9644 + sgot->output_offset
9645 + cur_off);
9646 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9647
9648 if (globals->use_rel)
9649 bfd_put_32 (output_bfd, outrel.r_addend,
9650 sgot->contents + cur_off);
9651
9652 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9653
9654 if (indx == 0)
9655 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9656 sgot->contents + cur_off + 4);
9657 else
9658 {
9659 outrel.r_addend = 0;
9660 outrel.r_info = ELF32_R_INFO (indx,
9661 R_ARM_TLS_DTPOFF32);
9662 outrel.r_offset += 4;
9663
9664 if (globals->use_rel)
9665 bfd_put_32 (output_bfd, outrel.r_addend,
9666 sgot->contents + cur_off + 4);
9667
9668 elf32_arm_add_dynreloc (output_bfd, info,
9669 srelgot, &outrel);
9670 }
9671 }
9672 else
9673 {
9674 /* If we are not emitting relocations for a
9675 general dynamic reference, then we must be in a
9676 static link or an executable link with the
9677 symbol binding locally. Mark it as belonging
9678 to module 1, the executable. */
9679 bfd_put_32 (output_bfd, 1,
9680 sgot->contents + cur_off);
9681 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9682 sgot->contents + cur_off + 4);
9683 }
9684
9685 cur_off += 8;
9686 }
9687
9688 if (tls_type & GOT_TLS_IE)
9689 {
9690 if (need_relocs)
9691 {
9692 if (indx == 0)
9693 outrel.r_addend = value - dtpoff_base (info);
9694 else
9695 outrel.r_addend = 0;
9696 outrel.r_offset = (sgot->output_section->vma
9697 + sgot->output_offset
9698 + cur_off);
9699 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9700
9701 if (globals->use_rel)
9702 bfd_put_32 (output_bfd, outrel.r_addend,
9703 sgot->contents + cur_off);
9704
9705 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9706 }
9707 else
9708 bfd_put_32 (output_bfd, tpoff (info, value),
9709 sgot->contents + cur_off);
9710 cur_off += 4;
9711 }
9712
9713 if (h != NULL)
9714 h->got.offset |= 1;
9715 else
9716 local_got_offsets[r_symndx] |= 1;
9717 }
9718
9719 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9720 off += 8;
9721 else if (tls_type & GOT_TLS_GDESC)
9722 off = offplt;
9723
9724 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9725 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9726 {
9727 bfd_signed_vma offset;
9728 /* TLS stubs are arm mode. The original symbol is a
9729 data object, so branch_type is bogus. */
9730 branch_type = ST_BRANCH_TO_ARM;
9731 enum elf32_arm_stub_type stub_type
9732 = arm_type_of_stub (info, input_section, rel,
9733 st_type, &branch_type,
9734 (struct elf32_arm_link_hash_entry *)h,
9735 globals->tls_trampoline, globals->root.splt,
9736 input_bfd, sym_name);
9737
9738 if (stub_type != arm_stub_none)
9739 {
9740 struct elf32_arm_stub_hash_entry *stub_entry
9741 = elf32_arm_get_stub_entry
9742 (input_section, globals->root.splt, 0, rel,
9743 globals, stub_type);
9744 offset = (stub_entry->stub_offset
9745 + stub_entry->stub_sec->output_offset
9746 + stub_entry->stub_sec->output_section->vma);
9747 }
9748 else
9749 offset = (globals->root.splt->output_section->vma
9750 + globals->root.splt->output_offset
9751 + globals->tls_trampoline);
9752
9753 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9754 {
9755 unsigned long inst;
9756
9757 offset -= (input_section->output_section->vma
9758 + input_section->output_offset
9759 + rel->r_offset + 8);
9760
9761 inst = offset >> 2;
9762 inst &= 0x00ffffff;
9763 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9764 }
9765 else
9766 {
9767 /* Thumb blx encodes the offset in a complicated
9768 fashion. */
9769 unsigned upper_insn, lower_insn;
9770 unsigned neg;
9771
9772 offset -= (input_section->output_section->vma
9773 + input_section->output_offset
9774 + rel->r_offset + 4);
9775
9776 if (stub_type != arm_stub_none
9777 && arm_stub_is_thumb (stub_type))
9778 {
9779 lower_insn = 0xd000;
9780 }
9781 else
9782 {
9783 lower_insn = 0xc000;
9784 /* Round up the offset to a word boundary. */
9785 offset = (offset + 2) & ~2;
9786 }
9787
9788 neg = offset < 0;
9789 upper_insn = (0xf000
9790 | ((offset >> 12) & 0x3ff)
9791 | (neg << 10));
9792 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9793 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9794 | ((offset >> 1) & 0x7ff);
9795 bfd_put_16 (input_bfd, upper_insn, hit_data);
9796 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9797 return bfd_reloc_ok;
9798 }
9799 }
9800 /* These relocations needs special care, as besides the fact
9801 they point somewhere in .gotplt, the addend must be
9802 adjusted accordingly depending on the type of instruction
9803 we refer to. */
9804 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9805 {
9806 unsigned long data, insn;
9807 unsigned thumb;
9808
9809 data = bfd_get_32 (input_bfd, hit_data);
9810 thumb = data & 1;
9811 data &= ~1u;
9812
9813 if (thumb)
9814 {
9815 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9816 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9817 insn = (insn << 16)
9818 | bfd_get_16 (input_bfd,
9819 contents + rel->r_offset - data + 2);
9820 if ((insn & 0xf800c000) == 0xf000c000)
9821 /* bl/blx */
9822 value = -6;
9823 else if ((insn & 0xffffff00) == 0x4400)
9824 /* add */
9825 value = -5;
9826 else
9827 {
9828 (*_bfd_error_handler)
9829 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9830 input_bfd, input_section,
9831 (unsigned long)rel->r_offset, insn);
9832 return bfd_reloc_notsupported;
9833 }
9834 }
9835 else
9836 {
9837 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9838
9839 switch (insn >> 24)
9840 {
9841 case 0xeb: /* bl */
9842 case 0xfa: /* blx */
9843 value = -4;
9844 break;
9845
9846 case 0xe0: /* add */
9847 value = -8;
9848 break;
9849
9850 default:
9851 (*_bfd_error_handler)
9852 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9853 input_bfd, input_section,
9854 (unsigned long)rel->r_offset, insn);
9855 return bfd_reloc_notsupported;
9856 }
9857 }
9858
9859 value += ((globals->root.sgotplt->output_section->vma
9860 + globals->root.sgotplt->output_offset + off)
9861 - (input_section->output_section->vma
9862 + input_section->output_offset
9863 + rel->r_offset)
9864 + globals->sgotplt_jump_table_size);
9865 }
9866 else
9867 value = ((globals->root.sgot->output_section->vma
9868 + globals->root.sgot->output_offset + off)
9869 - (input_section->output_section->vma
9870 + input_section->output_offset + rel->r_offset));
9871
9872 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9873 contents, rel->r_offset, value,
9874 rel->r_addend);
9875 }
9876
9877 case R_ARM_TLS_LE32:
9878 if (info->shared && !info->pie)
9879 {
9880 (*_bfd_error_handler)
9881 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9882 input_bfd, input_section,
9883 (long) rel->r_offset, howto->name);
9884 return bfd_reloc_notsupported;
9885 }
9886 else
9887 value = tpoff (info, value);
9888
9889 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9890 contents, rel->r_offset, value,
9891 rel->r_addend);
9892
9893 case R_ARM_V4BX:
9894 if (globals->fix_v4bx)
9895 {
9896 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9897
9898 /* Ensure that we have a BX instruction. */
9899 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9900
9901 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9902 {
9903 /* Branch to veneer. */
9904 bfd_vma glue_addr;
9905 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9906 glue_addr -= input_section->output_section->vma
9907 + input_section->output_offset
9908 + rel->r_offset + 8;
9909 insn = (insn & 0xf0000000) | 0x0a000000
9910 | ((glue_addr >> 2) & 0x00ffffff);
9911 }
9912 else
9913 {
9914 /* Preserve Rm (lowest four bits) and the condition code
9915 (highest four bits). Other bits encode MOV PC,Rm. */
9916 insn = (insn & 0xf000000f) | 0x01a0f000;
9917 }
9918
9919 bfd_put_32 (input_bfd, insn, hit_data);
9920 }
9921 return bfd_reloc_ok;
9922
9923 case R_ARM_MOVW_ABS_NC:
9924 case R_ARM_MOVT_ABS:
9925 case R_ARM_MOVW_PREL_NC:
9926 case R_ARM_MOVT_PREL:
9927 /* Until we properly support segment-base-relative addressing then
9928 we assume the segment base to be zero, as for the group relocations.
9929 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9930 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9931 case R_ARM_MOVW_BREL_NC:
9932 case R_ARM_MOVW_BREL:
9933 case R_ARM_MOVT_BREL:
9934 {
9935 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9936
9937 if (globals->use_rel)
9938 {
9939 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9940 signed_addend = (addend ^ 0x8000) - 0x8000;
9941 }
9942
9943 value += signed_addend;
9944
9945 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9946 value -= (input_section->output_section->vma
9947 + input_section->output_offset + rel->r_offset);
9948
9949 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9950 return bfd_reloc_overflow;
9951
9952 if (branch_type == ST_BRANCH_TO_THUMB)
9953 value |= 1;
9954
9955 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9956 || r_type == R_ARM_MOVT_BREL)
9957 value >>= 16;
9958
9959 insn &= 0xfff0f000;
9960 insn |= value & 0xfff;
9961 insn |= (value & 0xf000) << 4;
9962 bfd_put_32 (input_bfd, insn, hit_data);
9963 }
9964 return bfd_reloc_ok;
9965
9966 case R_ARM_THM_MOVW_ABS_NC:
9967 case R_ARM_THM_MOVT_ABS:
9968 case R_ARM_THM_MOVW_PREL_NC:
9969 case R_ARM_THM_MOVT_PREL:
9970 /* Until we properly support segment-base-relative addressing then
9971 we assume the segment base to be zero, as for the above relocations.
9972 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9973 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9974 as R_ARM_THM_MOVT_ABS. */
9975 case R_ARM_THM_MOVW_BREL_NC:
9976 case R_ARM_THM_MOVW_BREL:
9977 case R_ARM_THM_MOVT_BREL:
9978 {
9979 bfd_vma insn;
9980
9981 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9982 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9983
9984 if (globals->use_rel)
9985 {
9986 addend = ((insn >> 4) & 0xf000)
9987 | ((insn >> 15) & 0x0800)
9988 | ((insn >> 4) & 0x0700)
9989 | (insn & 0x00ff);
9990 signed_addend = (addend ^ 0x8000) - 0x8000;
9991 }
9992
9993 value += signed_addend;
9994
9995 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9996 value -= (input_section->output_section->vma
9997 + input_section->output_offset + rel->r_offset);
9998
9999 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10000 return bfd_reloc_overflow;
10001
10002 if (branch_type == ST_BRANCH_TO_THUMB)
10003 value |= 1;
10004
10005 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10006 || r_type == R_ARM_THM_MOVT_BREL)
10007 value >>= 16;
10008
10009 insn &= 0xfbf08f00;
10010 insn |= (value & 0xf000) << 4;
10011 insn |= (value & 0x0800) << 15;
10012 insn |= (value & 0x0700) << 4;
10013 insn |= (value & 0x00ff);
10014
10015 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10016 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10017 }
10018 return bfd_reloc_ok;
10019
10020 case R_ARM_ALU_PC_G0_NC:
10021 case R_ARM_ALU_PC_G1_NC:
10022 case R_ARM_ALU_PC_G0:
10023 case R_ARM_ALU_PC_G1:
10024 case R_ARM_ALU_PC_G2:
10025 case R_ARM_ALU_SB_G0_NC:
10026 case R_ARM_ALU_SB_G1_NC:
10027 case R_ARM_ALU_SB_G0:
10028 case R_ARM_ALU_SB_G1:
10029 case R_ARM_ALU_SB_G2:
10030 {
10031 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10032 bfd_vma pc = input_section->output_section->vma
10033 + input_section->output_offset + rel->r_offset;
10034 /* sb is the origin of the *segment* containing the symbol. */
10035 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10036 bfd_vma residual;
10037 bfd_vma g_n;
10038 bfd_signed_vma signed_value;
10039 int group = 0;
10040
10041 /* Determine which group of bits to select. */
10042 switch (r_type)
10043 {
10044 case R_ARM_ALU_PC_G0_NC:
10045 case R_ARM_ALU_PC_G0:
10046 case R_ARM_ALU_SB_G0_NC:
10047 case R_ARM_ALU_SB_G0:
10048 group = 0;
10049 break;
10050
10051 case R_ARM_ALU_PC_G1_NC:
10052 case R_ARM_ALU_PC_G1:
10053 case R_ARM_ALU_SB_G1_NC:
10054 case R_ARM_ALU_SB_G1:
10055 group = 1;
10056 break;
10057
10058 case R_ARM_ALU_PC_G2:
10059 case R_ARM_ALU_SB_G2:
10060 group = 2;
10061 break;
10062
10063 default:
10064 abort ();
10065 }
10066
10067 /* If REL, extract the addend from the insn. If RELA, it will
10068 have already been fetched for us. */
10069 if (globals->use_rel)
10070 {
10071 int negative;
10072 bfd_vma constant = insn & 0xff;
10073 bfd_vma rotation = (insn & 0xf00) >> 8;
10074
10075 if (rotation == 0)
10076 signed_addend = constant;
10077 else
10078 {
10079 /* Compensate for the fact that in the instruction, the
10080 rotation is stored in multiples of 2 bits. */
10081 rotation *= 2;
10082
10083 /* Rotate "constant" right by "rotation" bits. */
10084 signed_addend = (constant >> rotation) |
10085 (constant << (8 * sizeof (bfd_vma) - rotation));
10086 }
10087
10088 /* Determine if the instruction is an ADD or a SUB.
10089 (For REL, this determines the sign of the addend.) */
10090 negative = identify_add_or_sub (insn);
10091 if (negative == 0)
10092 {
10093 (*_bfd_error_handler)
10094 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
10095 input_bfd, input_section,
10096 (long) rel->r_offset, howto->name);
10097 return bfd_reloc_overflow;
10098 }
10099
10100 signed_addend *= negative;
10101 }
10102
10103 /* Compute the value (X) to go in the place. */
10104 if (r_type == R_ARM_ALU_PC_G0_NC
10105 || r_type == R_ARM_ALU_PC_G1_NC
10106 || r_type == R_ARM_ALU_PC_G0
10107 || r_type == R_ARM_ALU_PC_G1
10108 || r_type == R_ARM_ALU_PC_G2)
10109 /* PC relative. */
10110 signed_value = value - pc + signed_addend;
10111 else
10112 /* Section base relative. */
10113 signed_value = value - sb + signed_addend;
10114
10115 /* If the target symbol is a Thumb function, then set the
10116 Thumb bit in the address. */
10117 if (branch_type == ST_BRANCH_TO_THUMB)
10118 signed_value |= 1;
10119
10120 /* Calculate the value of the relevant G_n, in encoded
10121 constant-with-rotation format. */
10122 g_n = calculate_group_reloc_mask (abs (signed_value), group,
10123 &residual);
10124
10125 /* Check for overflow if required. */
10126 if ((r_type == R_ARM_ALU_PC_G0
10127 || r_type == R_ARM_ALU_PC_G1
10128 || r_type == R_ARM_ALU_PC_G2
10129 || r_type == R_ARM_ALU_SB_G0
10130 || r_type == R_ARM_ALU_SB_G1
10131 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
10132 {
10133 (*_bfd_error_handler)
10134 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10135 input_bfd, input_section,
10136 (long) rel->r_offset, abs (signed_value), howto->name);
10137 return bfd_reloc_overflow;
10138 }
10139
10140 /* Mask out the value and the ADD/SUB part of the opcode; take care
10141 not to destroy the S bit. */
10142 insn &= 0xff1ff000;
10143
10144 /* Set the opcode according to whether the value to go in the
10145 place is negative. */
10146 if (signed_value < 0)
10147 insn |= 1 << 22;
10148 else
10149 insn |= 1 << 23;
10150
10151 /* Encode the offset. */
10152 insn |= g_n;
10153
10154 bfd_put_32 (input_bfd, insn, hit_data);
10155 }
10156 return bfd_reloc_ok;
10157
10158 case R_ARM_LDR_PC_G0:
10159 case R_ARM_LDR_PC_G1:
10160 case R_ARM_LDR_PC_G2:
10161 case R_ARM_LDR_SB_G0:
10162 case R_ARM_LDR_SB_G1:
10163 case R_ARM_LDR_SB_G2:
10164 {
10165 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10166 bfd_vma pc = input_section->output_section->vma
10167 + input_section->output_offset + rel->r_offset;
10168 /* sb is the origin of the *segment* containing the symbol. */
10169 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10170 bfd_vma residual;
10171 bfd_signed_vma signed_value;
10172 int group = 0;
10173
10174 /* Determine which groups of bits to calculate. */
10175 switch (r_type)
10176 {
10177 case R_ARM_LDR_PC_G0:
10178 case R_ARM_LDR_SB_G0:
10179 group = 0;
10180 break;
10181
10182 case R_ARM_LDR_PC_G1:
10183 case R_ARM_LDR_SB_G1:
10184 group = 1;
10185 break;
10186
10187 case R_ARM_LDR_PC_G2:
10188 case R_ARM_LDR_SB_G2:
10189 group = 2;
10190 break;
10191
10192 default:
10193 abort ();
10194 }
10195
10196 /* If REL, extract the addend from the insn. If RELA, it will
10197 have already been fetched for us. */
10198 if (globals->use_rel)
10199 {
10200 int negative = (insn & (1 << 23)) ? 1 : -1;
10201 signed_addend = negative * (insn & 0xfff);
10202 }
10203
10204 /* Compute the value (X) to go in the place. */
10205 if (r_type == R_ARM_LDR_PC_G0
10206 || r_type == R_ARM_LDR_PC_G1
10207 || r_type == R_ARM_LDR_PC_G2)
10208 /* PC relative. */
10209 signed_value = value - pc + signed_addend;
10210 else
10211 /* Section base relative. */
10212 signed_value = value - sb + signed_addend;
10213
10214 /* Calculate the value of the relevant G_{n-1} to obtain
10215 the residual at that stage. */
10216 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10217
10218 /* Check for overflow. */
10219 if (residual >= 0x1000)
10220 {
10221 (*_bfd_error_handler)
10222 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10223 input_bfd, input_section,
10224 (long) rel->r_offset, abs (signed_value), howto->name);
10225 return bfd_reloc_overflow;
10226 }
10227
10228 /* Mask out the value and U bit. */
10229 insn &= 0xff7ff000;
10230
10231 /* Set the U bit if the value to go in the place is non-negative. */
10232 if (signed_value >= 0)
10233 insn |= 1 << 23;
10234
10235 /* Encode the offset. */
10236 insn |= residual;
10237
10238 bfd_put_32 (input_bfd, insn, hit_data);
10239 }
10240 return bfd_reloc_ok;
10241
10242 case R_ARM_LDRS_PC_G0:
10243 case R_ARM_LDRS_PC_G1:
10244 case R_ARM_LDRS_PC_G2:
10245 case R_ARM_LDRS_SB_G0:
10246 case R_ARM_LDRS_SB_G1:
10247 case R_ARM_LDRS_SB_G2:
10248 {
10249 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10250 bfd_vma pc = input_section->output_section->vma
10251 + input_section->output_offset + rel->r_offset;
10252 /* sb is the origin of the *segment* containing the symbol. */
10253 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10254 bfd_vma residual;
10255 bfd_signed_vma signed_value;
10256 int group = 0;
10257
10258 /* Determine which groups of bits to calculate. */
10259 switch (r_type)
10260 {
10261 case R_ARM_LDRS_PC_G0:
10262 case R_ARM_LDRS_SB_G0:
10263 group = 0;
10264 break;
10265
10266 case R_ARM_LDRS_PC_G1:
10267 case R_ARM_LDRS_SB_G1:
10268 group = 1;
10269 break;
10270
10271 case R_ARM_LDRS_PC_G2:
10272 case R_ARM_LDRS_SB_G2:
10273 group = 2;
10274 break;
10275
10276 default:
10277 abort ();
10278 }
10279
10280 /* If REL, extract the addend from the insn. If RELA, it will
10281 have already been fetched for us. */
10282 if (globals->use_rel)
10283 {
10284 int negative = (insn & (1 << 23)) ? 1 : -1;
10285 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10286 }
10287
10288 /* Compute the value (X) to go in the place. */
10289 if (r_type == R_ARM_LDRS_PC_G0
10290 || r_type == R_ARM_LDRS_PC_G1
10291 || r_type == R_ARM_LDRS_PC_G2)
10292 /* PC relative. */
10293 signed_value = value - pc + signed_addend;
10294 else
10295 /* Section base relative. */
10296 signed_value = value - sb + signed_addend;
10297
10298 /* Calculate the value of the relevant G_{n-1} to obtain
10299 the residual at that stage. */
10300 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10301
10302 /* Check for overflow. */
10303 if (residual >= 0x100)
10304 {
10305 (*_bfd_error_handler)
10306 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10307 input_bfd, input_section,
10308 (long) rel->r_offset, abs (signed_value), howto->name);
10309 return bfd_reloc_overflow;
10310 }
10311
10312 /* Mask out the value and U bit. */
10313 insn &= 0xff7ff0f0;
10314
10315 /* Set the U bit if the value to go in the place is non-negative. */
10316 if (signed_value >= 0)
10317 insn |= 1 << 23;
10318
10319 /* Encode the offset. */
10320 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10321
10322 bfd_put_32 (input_bfd, insn, hit_data);
10323 }
10324 return bfd_reloc_ok;
10325
10326 case R_ARM_LDC_PC_G0:
10327 case R_ARM_LDC_PC_G1:
10328 case R_ARM_LDC_PC_G2:
10329 case R_ARM_LDC_SB_G0:
10330 case R_ARM_LDC_SB_G1:
10331 case R_ARM_LDC_SB_G2:
10332 {
10333 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10334 bfd_vma pc = input_section->output_section->vma
10335 + input_section->output_offset + rel->r_offset;
10336 /* sb is the origin of the *segment* containing the symbol. */
10337 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10338 bfd_vma residual;
10339 bfd_signed_vma signed_value;
10340 int group = 0;
10341
10342 /* Determine which groups of bits to calculate. */
10343 switch (r_type)
10344 {
10345 case R_ARM_LDC_PC_G0:
10346 case R_ARM_LDC_SB_G0:
10347 group = 0;
10348 break;
10349
10350 case R_ARM_LDC_PC_G1:
10351 case R_ARM_LDC_SB_G1:
10352 group = 1;
10353 break;
10354
10355 case R_ARM_LDC_PC_G2:
10356 case R_ARM_LDC_SB_G2:
10357 group = 2;
10358 break;
10359
10360 default:
10361 abort ();
10362 }
10363
10364 /* If REL, extract the addend from the insn. If RELA, it will
10365 have already been fetched for us. */
10366 if (globals->use_rel)
10367 {
10368 int negative = (insn & (1 << 23)) ? 1 : -1;
10369 signed_addend = negative * ((insn & 0xff) << 2);
10370 }
10371
10372 /* Compute the value (X) to go in the place. */
10373 if (r_type == R_ARM_LDC_PC_G0
10374 || r_type == R_ARM_LDC_PC_G1
10375 || r_type == R_ARM_LDC_PC_G2)
10376 /* PC relative. */
10377 signed_value = value - pc + signed_addend;
10378 else
10379 /* Section base relative. */
10380 signed_value = value - sb + signed_addend;
10381
10382 /* Calculate the value of the relevant G_{n-1} to obtain
10383 the residual at that stage. */
10384 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10385
10386 /* Check for overflow. (The absolute value to go in the place must be
10387 divisible by four and, after having been divided by four, must
10388 fit in eight bits.) */
10389 if ((residual & 0x3) != 0 || residual >= 0x400)
10390 {
10391 (*_bfd_error_handler)
10392 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10393 input_bfd, input_section,
10394 (long) rel->r_offset, abs (signed_value), howto->name);
10395 return bfd_reloc_overflow;
10396 }
10397
10398 /* Mask out the value and U bit. */
10399 insn &= 0xff7fff00;
10400
10401 /* Set the U bit if the value to go in the place is non-negative. */
10402 if (signed_value >= 0)
10403 insn |= 1 << 23;
10404
10405 /* Encode the offset. */
10406 insn |= residual >> 2;
10407
10408 bfd_put_32 (input_bfd, insn, hit_data);
10409 }
10410 return bfd_reloc_ok;
10411
10412 default:
10413 return bfd_reloc_notsupported;
10414 }
10415 }
10416
10417 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10418 static void
10419 arm_add_to_rel (bfd * abfd,
10420 bfd_byte * address,
10421 reloc_howto_type * howto,
10422 bfd_signed_vma increment)
10423 {
10424 bfd_signed_vma addend;
10425
10426 if (howto->type == R_ARM_THM_CALL
10427 || howto->type == R_ARM_THM_JUMP24)
10428 {
10429 int upper_insn, lower_insn;
10430 int upper, lower;
10431
10432 upper_insn = bfd_get_16 (abfd, address);
10433 lower_insn = bfd_get_16 (abfd, address + 2);
10434 upper = upper_insn & 0x7ff;
10435 lower = lower_insn & 0x7ff;
10436
10437 addend = (upper << 12) | (lower << 1);
10438 addend += increment;
10439 addend >>= 1;
10440
10441 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10442 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10443
10444 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10445 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10446 }
10447 else
10448 {
10449 bfd_vma contents;
10450
10451 contents = bfd_get_32 (abfd, address);
10452
10453 /* Get the (signed) value from the instruction. */
10454 addend = contents & howto->src_mask;
10455 if (addend & ((howto->src_mask + 1) >> 1))
10456 {
10457 bfd_signed_vma mask;
10458
10459 mask = -1;
10460 mask &= ~ howto->src_mask;
10461 addend |= mask;
10462 }
10463
10464 /* Add in the increment, (which is a byte value). */
10465 switch (howto->type)
10466 {
10467 default:
10468 addend += increment;
10469 break;
10470
10471 case R_ARM_PC24:
10472 case R_ARM_PLT32:
10473 case R_ARM_CALL:
10474 case R_ARM_JUMP24:
10475 addend <<= howto->size;
10476 addend += increment;
10477
10478 /* Should we check for overflow here ? */
10479
10480 /* Drop any undesired bits. */
10481 addend >>= howto->rightshift;
10482 break;
10483 }
10484
10485 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10486
10487 bfd_put_32 (abfd, contents, address);
10488 }
10489 }
10490
10491 #define IS_ARM_TLS_RELOC(R_TYPE) \
10492 ((R_TYPE) == R_ARM_TLS_GD32 \
10493 || (R_TYPE) == R_ARM_TLS_LDO32 \
10494 || (R_TYPE) == R_ARM_TLS_LDM32 \
10495 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10496 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10497 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10498 || (R_TYPE) == R_ARM_TLS_LE32 \
10499 || (R_TYPE) == R_ARM_TLS_IE32 \
10500 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10501
10502 /* Specific set of relocations for the gnu tls dialect. */
10503 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10504 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10505 || (R_TYPE) == R_ARM_TLS_CALL \
10506 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10507 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10508 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10509
10510 /* Relocate an ARM ELF section. */
10511
10512 static bfd_boolean
10513 elf32_arm_relocate_section (bfd * output_bfd,
10514 struct bfd_link_info * info,
10515 bfd * input_bfd,
10516 asection * input_section,
10517 bfd_byte * contents,
10518 Elf_Internal_Rela * relocs,
10519 Elf_Internal_Sym * local_syms,
10520 asection ** local_sections)
10521 {
10522 Elf_Internal_Shdr *symtab_hdr;
10523 struct elf_link_hash_entry **sym_hashes;
10524 Elf_Internal_Rela *rel;
10525 Elf_Internal_Rela *relend;
10526 const char *name;
10527 struct elf32_arm_link_hash_table * globals;
10528
10529 globals = elf32_arm_hash_table (info);
10530 if (globals == NULL)
10531 return FALSE;
10532
10533 symtab_hdr = & elf_symtab_hdr (input_bfd);
10534 sym_hashes = elf_sym_hashes (input_bfd);
10535
10536 rel = relocs;
10537 relend = relocs + input_section->reloc_count;
10538 for (; rel < relend; rel++)
10539 {
10540 int r_type;
10541 reloc_howto_type * howto;
10542 unsigned long r_symndx;
10543 Elf_Internal_Sym * sym;
10544 asection * sec;
10545 struct elf_link_hash_entry * h;
10546 bfd_vma relocation;
10547 bfd_reloc_status_type r;
10548 arelent bfd_reloc;
10549 char sym_type;
10550 bfd_boolean unresolved_reloc = FALSE;
10551 char *error_message = NULL;
10552
10553 r_symndx = ELF32_R_SYM (rel->r_info);
10554 r_type = ELF32_R_TYPE (rel->r_info);
10555 r_type = arm_real_reloc_type (globals, r_type);
10556
10557 if ( r_type == R_ARM_GNU_VTENTRY
10558 || r_type == R_ARM_GNU_VTINHERIT)
10559 continue;
10560
10561 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10562 howto = bfd_reloc.howto;
10563
10564 h = NULL;
10565 sym = NULL;
10566 sec = NULL;
10567
10568 if (r_symndx < symtab_hdr->sh_info)
10569 {
10570 sym = local_syms + r_symndx;
10571 sym_type = ELF32_ST_TYPE (sym->st_info);
10572 sec = local_sections[r_symndx];
10573
10574 /* An object file might have a reference to a local
10575 undefined symbol. This is a daft object file, but we
10576 should at least do something about it. V4BX & NONE
10577 relocations do not use the symbol and are explicitly
10578 allowed to use the undefined symbol, so allow those.
10579 Likewise for relocations against STN_UNDEF. */
10580 if (r_type != R_ARM_V4BX
10581 && r_type != R_ARM_NONE
10582 && r_symndx != STN_UNDEF
10583 && bfd_is_und_section (sec)
10584 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10585 {
10586 if (!info->callbacks->undefined_symbol
10587 (info, bfd_elf_string_from_elf_section
10588 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10589 input_bfd, input_section,
10590 rel->r_offset, TRUE))
10591 return FALSE;
10592 }
10593
10594 if (globals->use_rel)
10595 {
10596 relocation = (sec->output_section->vma
10597 + sec->output_offset
10598 + sym->st_value);
10599 if (!info->relocatable
10600 && (sec->flags & SEC_MERGE)
10601 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10602 {
10603 asection *msec;
10604 bfd_vma addend, value;
10605
10606 switch (r_type)
10607 {
10608 case R_ARM_MOVW_ABS_NC:
10609 case R_ARM_MOVT_ABS:
10610 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10611 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10612 addend = (addend ^ 0x8000) - 0x8000;
10613 break;
10614
10615 case R_ARM_THM_MOVW_ABS_NC:
10616 case R_ARM_THM_MOVT_ABS:
10617 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10618 << 16;
10619 value |= bfd_get_16 (input_bfd,
10620 contents + rel->r_offset + 2);
10621 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10622 | ((value & 0x04000000) >> 15);
10623 addend = (addend ^ 0x8000) - 0x8000;
10624 break;
10625
10626 default:
10627 if (howto->rightshift
10628 || (howto->src_mask & (howto->src_mask + 1)))
10629 {
10630 (*_bfd_error_handler)
10631 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10632 input_bfd, input_section,
10633 (long) rel->r_offset, howto->name);
10634 return FALSE;
10635 }
10636
10637 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10638
10639 /* Get the (signed) value from the instruction. */
10640 addend = value & howto->src_mask;
10641 if (addend & ((howto->src_mask + 1) >> 1))
10642 {
10643 bfd_signed_vma mask;
10644
10645 mask = -1;
10646 mask &= ~ howto->src_mask;
10647 addend |= mask;
10648 }
10649 break;
10650 }
10651
10652 msec = sec;
10653 addend =
10654 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10655 - relocation;
10656 addend += msec->output_section->vma + msec->output_offset;
10657
10658 /* Cases here must match those in the preceding
10659 switch statement. */
10660 switch (r_type)
10661 {
10662 case R_ARM_MOVW_ABS_NC:
10663 case R_ARM_MOVT_ABS:
10664 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10665 | (addend & 0xfff);
10666 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10667 break;
10668
10669 case R_ARM_THM_MOVW_ABS_NC:
10670 case R_ARM_THM_MOVT_ABS:
10671 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10672 | (addend & 0xff) | ((addend & 0x0800) << 15);
10673 bfd_put_16 (input_bfd, value >> 16,
10674 contents + rel->r_offset);
10675 bfd_put_16 (input_bfd, value,
10676 contents + rel->r_offset + 2);
10677 break;
10678
10679 default:
10680 value = (value & ~ howto->dst_mask)
10681 | (addend & howto->dst_mask);
10682 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10683 break;
10684 }
10685 }
10686 }
10687 else
10688 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10689 }
10690 else
10691 {
10692 bfd_boolean warned, ignored;
10693
10694 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10695 r_symndx, symtab_hdr, sym_hashes,
10696 h, sec, relocation,
10697 unresolved_reloc, warned, ignored);
10698
10699 sym_type = h->type;
10700 }
10701
10702 if (sec != NULL && discarded_section (sec))
10703 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10704 rel, 1, relend, howto, 0, contents);
10705
10706 if (info->relocatable)
10707 {
10708 /* This is a relocatable link. We don't have to change
10709 anything, unless the reloc is against a section symbol,
10710 in which case we have to adjust according to where the
10711 section symbol winds up in the output section. */
10712 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10713 {
10714 if (globals->use_rel)
10715 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10716 howto, (bfd_signed_vma) sec->output_offset);
10717 else
10718 rel->r_addend += sec->output_offset;
10719 }
10720 continue;
10721 }
10722
10723 if (h != NULL)
10724 name = h->root.root.string;
10725 else
10726 {
10727 name = (bfd_elf_string_from_elf_section
10728 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10729 if (name == NULL || *name == '\0')
10730 name = bfd_section_name (input_bfd, sec);
10731 }
10732
10733 if (r_symndx != STN_UNDEF
10734 && r_type != R_ARM_NONE
10735 && (h == NULL
10736 || h->root.type == bfd_link_hash_defined
10737 || h->root.type == bfd_link_hash_defweak)
10738 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10739 {
10740 (*_bfd_error_handler)
10741 ((sym_type == STT_TLS
10742 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10743 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10744 input_bfd,
10745 input_section,
10746 (long) rel->r_offset,
10747 howto->name,
10748 name);
10749 }
10750
10751 /* We call elf32_arm_final_link_relocate unless we're completely
10752 done, i.e., the relaxation produced the final output we want,
10753 and we won't let anybody mess with it. Also, we have to do
10754 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10755 both in relaxed and non-relaxed cases. */
10756 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10757 || (IS_ARM_TLS_GNU_RELOC (r_type)
10758 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10759 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10760 & GOT_TLS_GDESC)))
10761 {
10762 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10763 contents, rel, h == NULL);
10764 /* This may have been marked unresolved because it came from
10765 a shared library. But we've just dealt with that. */
10766 unresolved_reloc = 0;
10767 }
10768 else
10769 r = bfd_reloc_continue;
10770
10771 if (r == bfd_reloc_continue)
10772 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10773 input_section, contents, rel,
10774 relocation, info, sec, name, sym_type,
10775 (h ? h->target_internal
10776 : ARM_SYM_BRANCH_TYPE (sym)), h,
10777 &unresolved_reloc, &error_message);
10778
10779 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10780 because such sections are not SEC_ALLOC and thus ld.so will
10781 not process them. */
10782 if (unresolved_reloc
10783 && !((input_section->flags & SEC_DEBUGGING) != 0
10784 && h->def_dynamic)
10785 && _bfd_elf_section_offset (output_bfd, info, input_section,
10786 rel->r_offset) != (bfd_vma) -1)
10787 {
10788 (*_bfd_error_handler)
10789 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10790 input_bfd,
10791 input_section,
10792 (long) rel->r_offset,
10793 howto->name,
10794 h->root.root.string);
10795 return FALSE;
10796 }
10797
10798 if (r != bfd_reloc_ok)
10799 {
10800 switch (r)
10801 {
10802 case bfd_reloc_overflow:
10803 /* If the overflowing reloc was to an undefined symbol,
10804 we have already printed one error message and there
10805 is no point complaining again. */
10806 if ((! h ||
10807 h->root.type != bfd_link_hash_undefined)
10808 && (!((*info->callbacks->reloc_overflow)
10809 (info, (h ? &h->root : NULL), name, howto->name,
10810 (bfd_vma) 0, input_bfd, input_section,
10811 rel->r_offset))))
10812 return FALSE;
10813 break;
10814
10815 case bfd_reloc_undefined:
10816 if (!((*info->callbacks->undefined_symbol)
10817 (info, name, input_bfd, input_section,
10818 rel->r_offset, TRUE)))
10819 return FALSE;
10820 break;
10821
10822 case bfd_reloc_outofrange:
10823 error_message = _("out of range");
10824 goto common_error;
10825
10826 case bfd_reloc_notsupported:
10827 error_message = _("unsupported relocation");
10828 goto common_error;
10829
10830 case bfd_reloc_dangerous:
10831 /* error_message should already be set. */
10832 goto common_error;
10833
10834 default:
10835 error_message = _("unknown error");
10836 /* Fall through. */
10837
10838 common_error:
10839 BFD_ASSERT (error_message != NULL);
10840 if (!((*info->callbacks->reloc_dangerous)
10841 (info, error_message, input_bfd, input_section,
10842 rel->r_offset)))
10843 return FALSE;
10844 break;
10845 }
10846 }
10847 }
10848
10849 return TRUE;
10850 }
10851
10852 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10853 adds the edit to the start of the list. (The list must be built in order of
10854 ascending TINDEX: the function's callers are primarily responsible for
10855 maintaining that condition). */
10856
10857 static void
10858 add_unwind_table_edit (arm_unwind_table_edit **head,
10859 arm_unwind_table_edit **tail,
10860 arm_unwind_edit_type type,
10861 asection *linked_section,
10862 unsigned int tindex)
10863 {
10864 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10865 xmalloc (sizeof (arm_unwind_table_edit));
10866
10867 new_edit->type = type;
10868 new_edit->linked_section = linked_section;
10869 new_edit->index = tindex;
10870
10871 if (tindex > 0)
10872 {
10873 new_edit->next = NULL;
10874
10875 if (*tail)
10876 (*tail)->next = new_edit;
10877
10878 (*tail) = new_edit;
10879
10880 if (!*head)
10881 (*head) = new_edit;
10882 }
10883 else
10884 {
10885 new_edit->next = *head;
10886
10887 if (!*tail)
10888 *tail = new_edit;
10889
10890 *head = new_edit;
10891 }
10892 }
10893
10894 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10895
10896 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10897 static void
10898 adjust_exidx_size(asection *exidx_sec, int adjust)
10899 {
10900 asection *out_sec;
10901
10902 if (!exidx_sec->rawsize)
10903 exidx_sec->rawsize = exidx_sec->size;
10904
10905 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10906 out_sec = exidx_sec->output_section;
10907 /* Adjust size of output section. */
10908 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10909 }
10910
10911 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10912 static void
10913 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10914 {
10915 struct _arm_elf_section_data *exidx_arm_data;
10916
10917 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10918 add_unwind_table_edit (
10919 &exidx_arm_data->u.exidx.unwind_edit_list,
10920 &exidx_arm_data->u.exidx.unwind_edit_tail,
10921 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10922
10923 adjust_exidx_size(exidx_sec, 8);
10924 }
10925
10926 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10927 made to those tables, such that:
10928
10929 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10930 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10931 codes which have been inlined into the index).
10932
10933 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10934
10935 The edits are applied when the tables are written
10936 (in elf32_arm_write_section). */
10937
10938 bfd_boolean
10939 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10940 unsigned int num_text_sections,
10941 struct bfd_link_info *info,
10942 bfd_boolean merge_exidx_entries)
10943 {
10944 bfd *inp;
10945 unsigned int last_second_word = 0, i;
10946 asection *last_exidx_sec = NULL;
10947 asection *last_text_sec = NULL;
10948 int last_unwind_type = -1;
10949
10950 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10951 text sections. */
10952 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
10953 {
10954 asection *sec;
10955
10956 for (sec = inp->sections; sec != NULL; sec = sec->next)
10957 {
10958 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10959 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10960
10961 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10962 continue;
10963
10964 if (elf_sec->linked_to)
10965 {
10966 Elf_Internal_Shdr *linked_hdr
10967 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10968 struct _arm_elf_section_data *linked_sec_arm_data
10969 = get_arm_elf_section_data (linked_hdr->bfd_section);
10970
10971 if (linked_sec_arm_data == NULL)
10972 continue;
10973
10974 /* Link this .ARM.exidx section back from the text section it
10975 describes. */
10976 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10977 }
10978 }
10979 }
10980
10981 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10982 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10983 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10984
10985 for (i = 0; i < num_text_sections; i++)
10986 {
10987 asection *sec = text_section_order[i];
10988 asection *exidx_sec;
10989 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10990 struct _arm_elf_section_data *exidx_arm_data;
10991 bfd_byte *contents = NULL;
10992 int deleted_exidx_bytes = 0;
10993 bfd_vma j;
10994 arm_unwind_table_edit *unwind_edit_head = NULL;
10995 arm_unwind_table_edit *unwind_edit_tail = NULL;
10996 Elf_Internal_Shdr *hdr;
10997 bfd *ibfd;
10998
10999 if (arm_data == NULL)
11000 continue;
11001
11002 exidx_sec = arm_data->u.text.arm_exidx_sec;
11003 if (exidx_sec == NULL)
11004 {
11005 /* Section has no unwind data. */
11006 if (last_unwind_type == 0 || !last_exidx_sec)
11007 continue;
11008
11009 /* Ignore zero sized sections. */
11010 if (sec->size == 0)
11011 continue;
11012
11013 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11014 last_unwind_type = 0;
11015 continue;
11016 }
11017
11018 /* Skip /DISCARD/ sections. */
11019 if (bfd_is_abs_section (exidx_sec->output_section))
11020 continue;
11021
11022 hdr = &elf_section_data (exidx_sec)->this_hdr;
11023 if (hdr->sh_type != SHT_ARM_EXIDX)
11024 continue;
11025
11026 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11027 if (exidx_arm_data == NULL)
11028 continue;
11029
11030 ibfd = exidx_sec->owner;
11031
11032 if (hdr->contents != NULL)
11033 contents = hdr->contents;
11034 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11035 /* An error? */
11036 continue;
11037
11038 for (j = 0; j < hdr->sh_size; j += 8)
11039 {
11040 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11041 int unwind_type;
11042 int elide = 0;
11043
11044 /* An EXIDX_CANTUNWIND entry. */
11045 if (second_word == 1)
11046 {
11047 if (last_unwind_type == 0)
11048 elide = 1;
11049 unwind_type = 0;
11050 }
11051 /* Inlined unwinding data. Merge if equal to previous. */
11052 else if ((second_word & 0x80000000) != 0)
11053 {
11054 if (merge_exidx_entries
11055 && last_second_word == second_word && last_unwind_type == 1)
11056 elide = 1;
11057 unwind_type = 1;
11058 last_second_word = second_word;
11059 }
11060 /* Normal table entry. In theory we could merge these too,
11061 but duplicate entries are likely to be much less common. */
11062 else
11063 unwind_type = 2;
11064
11065 if (elide)
11066 {
11067 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
11068 DELETE_EXIDX_ENTRY, NULL, j / 8);
11069
11070 deleted_exidx_bytes += 8;
11071 }
11072
11073 last_unwind_type = unwind_type;
11074 }
11075
11076 /* Free contents if we allocated it ourselves. */
11077 if (contents != hdr->contents)
11078 free (contents);
11079
11080 /* Record edits to be applied later (in elf32_arm_write_section). */
11081 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
11082 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
11083
11084 if (deleted_exidx_bytes > 0)
11085 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
11086
11087 last_exidx_sec = exidx_sec;
11088 last_text_sec = sec;
11089 }
11090
11091 /* Add terminating CANTUNWIND entry. */
11092 if (last_exidx_sec && last_unwind_type != 0)
11093 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11094
11095 return TRUE;
11096 }
11097
11098 static bfd_boolean
11099 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
11100 bfd *ibfd, const char *name)
11101 {
11102 asection *sec, *osec;
11103
11104 sec = bfd_get_linker_section (ibfd, name);
11105 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
11106 return TRUE;
11107
11108 osec = sec->output_section;
11109 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
11110 return TRUE;
11111
11112 if (! bfd_set_section_contents (obfd, osec, sec->contents,
11113 sec->output_offset, sec->size))
11114 return FALSE;
11115
11116 return TRUE;
11117 }
11118
11119 static bfd_boolean
11120 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
11121 {
11122 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
11123 asection *sec, *osec;
11124
11125 if (globals == NULL)
11126 return FALSE;
11127
11128 /* Invoke the regular ELF backend linker to do all the work. */
11129 if (!bfd_elf_final_link (abfd, info))
11130 return FALSE;
11131
11132 /* Process stub sections (eg BE8 encoding, ...). */
11133 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
11134 int i;
11135 for (i=0; i<htab->top_id; i++)
11136 {
11137 sec = htab->stub_group[i].stub_sec;
11138 /* Only process it once, in its link_sec slot. */
11139 if (sec && i == htab->stub_group[i].link_sec->id)
11140 {
11141 osec = sec->output_section;
11142 elf32_arm_write_section (abfd, info, sec, sec->contents);
11143 if (! bfd_set_section_contents (abfd, osec, sec->contents,
11144 sec->output_offset, sec->size))
11145 return FALSE;
11146 }
11147 }
11148
11149 /* Write out any glue sections now that we have created all the
11150 stubs. */
11151 if (globals->bfd_of_glue_owner != NULL)
11152 {
11153 if (! elf32_arm_output_glue_section (info, abfd,
11154 globals->bfd_of_glue_owner,
11155 ARM2THUMB_GLUE_SECTION_NAME))
11156 return FALSE;
11157
11158 if (! elf32_arm_output_glue_section (info, abfd,
11159 globals->bfd_of_glue_owner,
11160 THUMB2ARM_GLUE_SECTION_NAME))
11161 return FALSE;
11162
11163 if (! elf32_arm_output_glue_section (info, abfd,
11164 globals->bfd_of_glue_owner,
11165 VFP11_ERRATUM_VENEER_SECTION_NAME))
11166 return FALSE;
11167
11168 if (! elf32_arm_output_glue_section (info, abfd,
11169 globals->bfd_of_glue_owner,
11170 ARM_BX_GLUE_SECTION_NAME))
11171 return FALSE;
11172 }
11173
11174 return TRUE;
11175 }
11176
11177 /* Return a best guess for the machine number based on the attributes. */
11178
11179 static unsigned int
11180 bfd_arm_get_mach_from_attributes (bfd * abfd)
11181 {
11182 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
11183
11184 switch (arch)
11185 {
11186 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
11187 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
11188 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
11189
11190 case TAG_CPU_ARCH_V5TE:
11191 {
11192 char * name;
11193
11194 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
11195 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
11196
11197 if (name)
11198 {
11199 if (strcmp (name, "IWMMXT2") == 0)
11200 return bfd_mach_arm_iWMMXt2;
11201
11202 if (strcmp (name, "IWMMXT") == 0)
11203 return bfd_mach_arm_iWMMXt;
11204
11205 if (strcmp (name, "XSCALE") == 0)
11206 {
11207 int wmmx;
11208
11209 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
11210 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
11211 switch (wmmx)
11212 {
11213 case 1: return bfd_mach_arm_iWMMXt;
11214 case 2: return bfd_mach_arm_iWMMXt2;
11215 default: return bfd_mach_arm_XScale;
11216 }
11217 }
11218 }
11219
11220 return bfd_mach_arm_5TE;
11221 }
11222
11223 default:
11224 return bfd_mach_arm_unknown;
11225 }
11226 }
11227
11228 /* Set the right machine number. */
11229
11230 static bfd_boolean
11231 elf32_arm_object_p (bfd *abfd)
11232 {
11233 unsigned int mach;
11234
11235 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11236
11237 if (mach == bfd_mach_arm_unknown)
11238 {
11239 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11240 mach = bfd_mach_arm_ep9312;
11241 else
11242 mach = bfd_arm_get_mach_from_attributes (abfd);
11243 }
11244
11245 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11246 return TRUE;
11247 }
11248
11249 /* Function to keep ARM specific flags in the ELF header. */
11250
11251 static bfd_boolean
11252 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11253 {
11254 if (elf_flags_init (abfd)
11255 && elf_elfheader (abfd)->e_flags != flags)
11256 {
11257 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11258 {
11259 if (flags & EF_ARM_INTERWORK)
11260 (*_bfd_error_handler)
11261 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11262 abfd);
11263 else
11264 _bfd_error_handler
11265 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11266 abfd);
11267 }
11268 }
11269 else
11270 {
11271 elf_elfheader (abfd)->e_flags = flags;
11272 elf_flags_init (abfd) = TRUE;
11273 }
11274
11275 return TRUE;
11276 }
11277
11278 /* Copy backend specific data from one object module to another. */
11279
11280 static bfd_boolean
11281 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11282 {
11283 flagword in_flags;
11284 flagword out_flags;
11285
11286 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11287 return TRUE;
11288
11289 in_flags = elf_elfheader (ibfd)->e_flags;
11290 out_flags = elf_elfheader (obfd)->e_flags;
11291
11292 if (elf_flags_init (obfd)
11293 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11294 && in_flags != out_flags)
11295 {
11296 /* Cannot mix APCS26 and APCS32 code. */
11297 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11298 return FALSE;
11299
11300 /* Cannot mix float APCS and non-float APCS code. */
11301 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11302 return FALSE;
11303
11304 /* If the src and dest have different interworking flags
11305 then turn off the interworking bit. */
11306 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11307 {
11308 if (out_flags & EF_ARM_INTERWORK)
11309 _bfd_error_handler
11310 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11311 obfd, ibfd);
11312
11313 in_flags &= ~EF_ARM_INTERWORK;
11314 }
11315
11316 /* Likewise for PIC, though don't warn for this case. */
11317 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11318 in_flags &= ~EF_ARM_PIC;
11319 }
11320
11321 elf_elfheader (obfd)->e_flags = in_flags;
11322 elf_flags_init (obfd) = TRUE;
11323
11324 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
11325 }
11326
11327 /* Values for Tag_ABI_PCS_R9_use. */
11328 enum
11329 {
11330 AEABI_R9_V6,
11331 AEABI_R9_SB,
11332 AEABI_R9_TLS,
11333 AEABI_R9_unused
11334 };
11335
11336 /* Values for Tag_ABI_PCS_RW_data. */
11337 enum
11338 {
11339 AEABI_PCS_RW_data_absolute,
11340 AEABI_PCS_RW_data_PCrel,
11341 AEABI_PCS_RW_data_SBrel,
11342 AEABI_PCS_RW_data_unused
11343 };
11344
11345 /* Values for Tag_ABI_enum_size. */
11346 enum
11347 {
11348 AEABI_enum_unused,
11349 AEABI_enum_short,
11350 AEABI_enum_wide,
11351 AEABI_enum_forced_wide
11352 };
11353
11354 /* Determine whether an object attribute tag takes an integer, a
11355 string or both. */
11356
11357 static int
11358 elf32_arm_obj_attrs_arg_type (int tag)
11359 {
11360 if (tag == Tag_compatibility)
11361 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11362 else if (tag == Tag_nodefaults)
11363 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11364 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11365 return ATTR_TYPE_FLAG_STR_VAL;
11366 else if (tag < 32)
11367 return ATTR_TYPE_FLAG_INT_VAL;
11368 else
11369 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11370 }
11371
11372 /* The ABI defines that Tag_conformance should be emitted first, and that
11373 Tag_nodefaults should be second (if either is defined). This sets those
11374 two positions, and bumps up the position of all the remaining tags to
11375 compensate. */
11376 static int
11377 elf32_arm_obj_attrs_order (int num)
11378 {
11379 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11380 return Tag_conformance;
11381 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11382 return Tag_nodefaults;
11383 if ((num - 2) < Tag_nodefaults)
11384 return num - 2;
11385 if ((num - 1) < Tag_conformance)
11386 return num - 1;
11387 return num;
11388 }
11389
11390 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11391 static bfd_boolean
11392 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11393 {
11394 if ((tag & 127) < 64)
11395 {
11396 _bfd_error_handler
11397 (_("%B: Unknown mandatory EABI object attribute %d"),
11398 abfd, tag);
11399 bfd_set_error (bfd_error_bad_value);
11400 return FALSE;
11401 }
11402 else
11403 {
11404 _bfd_error_handler
11405 (_("Warning: %B: Unknown EABI object attribute %d"),
11406 abfd, tag);
11407 return TRUE;
11408 }
11409 }
11410
11411 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11412 Returns -1 if no architecture could be read. */
11413
11414 static int
11415 get_secondary_compatible_arch (bfd *abfd)
11416 {
11417 obj_attribute *attr =
11418 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11419
11420 /* Note: the tag and its argument below are uleb128 values, though
11421 currently-defined values fit in one byte for each. */
11422 if (attr->s
11423 && attr->s[0] == Tag_CPU_arch
11424 && (attr->s[1] & 128) != 128
11425 && attr->s[2] == 0)
11426 return attr->s[1];
11427
11428 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11429 return -1;
11430 }
11431
11432 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11433 The tag is removed if ARCH is -1. */
11434
11435 static void
11436 set_secondary_compatible_arch (bfd *abfd, int arch)
11437 {
11438 obj_attribute *attr =
11439 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11440
11441 if (arch == -1)
11442 {
11443 attr->s = NULL;
11444 return;
11445 }
11446
11447 /* Note: the tag and its argument below are uleb128 values, though
11448 currently-defined values fit in one byte for each. */
11449 if (!attr->s)
11450 attr->s = (char *) bfd_alloc (abfd, 3);
11451 attr->s[0] = Tag_CPU_arch;
11452 attr->s[1] = arch;
11453 attr->s[2] = '\0';
11454 }
11455
11456 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11457 into account. */
11458
11459 static int
11460 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11461 int newtag, int secondary_compat)
11462 {
11463 #define T(X) TAG_CPU_ARCH_##X
11464 int tagl, tagh, result;
11465 const int v6t2[] =
11466 {
11467 T(V6T2), /* PRE_V4. */
11468 T(V6T2), /* V4. */
11469 T(V6T2), /* V4T. */
11470 T(V6T2), /* V5T. */
11471 T(V6T2), /* V5TE. */
11472 T(V6T2), /* V5TEJ. */
11473 T(V6T2), /* V6. */
11474 T(V7), /* V6KZ. */
11475 T(V6T2) /* V6T2. */
11476 };
11477 const int v6k[] =
11478 {
11479 T(V6K), /* PRE_V4. */
11480 T(V6K), /* V4. */
11481 T(V6K), /* V4T. */
11482 T(V6K), /* V5T. */
11483 T(V6K), /* V5TE. */
11484 T(V6K), /* V5TEJ. */
11485 T(V6K), /* V6. */
11486 T(V6KZ), /* V6KZ. */
11487 T(V7), /* V6T2. */
11488 T(V6K) /* V6K. */
11489 };
11490 const int v7[] =
11491 {
11492 T(V7), /* PRE_V4. */
11493 T(V7), /* V4. */
11494 T(V7), /* V4T. */
11495 T(V7), /* V5T. */
11496 T(V7), /* V5TE. */
11497 T(V7), /* V5TEJ. */
11498 T(V7), /* V6. */
11499 T(V7), /* V6KZ. */
11500 T(V7), /* V6T2. */
11501 T(V7), /* V6K. */
11502 T(V7) /* V7. */
11503 };
11504 const int v6_m[] =
11505 {
11506 -1, /* PRE_V4. */
11507 -1, /* V4. */
11508 T(V6K), /* V4T. */
11509 T(V6K), /* V5T. */
11510 T(V6K), /* V5TE. */
11511 T(V6K), /* V5TEJ. */
11512 T(V6K), /* V6. */
11513 T(V6KZ), /* V6KZ. */
11514 T(V7), /* V6T2. */
11515 T(V6K), /* V6K. */
11516 T(V7), /* V7. */
11517 T(V6_M) /* V6_M. */
11518 };
11519 const int v6s_m[] =
11520 {
11521 -1, /* PRE_V4. */
11522 -1, /* V4. */
11523 T(V6K), /* V4T. */
11524 T(V6K), /* V5T. */
11525 T(V6K), /* V5TE. */
11526 T(V6K), /* V5TEJ. */
11527 T(V6K), /* V6. */
11528 T(V6KZ), /* V6KZ. */
11529 T(V7), /* V6T2. */
11530 T(V6K), /* V6K. */
11531 T(V7), /* V7. */
11532 T(V6S_M), /* V6_M. */
11533 T(V6S_M) /* V6S_M. */
11534 };
11535 const int v7e_m[] =
11536 {
11537 -1, /* PRE_V4. */
11538 -1, /* V4. */
11539 T(V7E_M), /* V4T. */
11540 T(V7E_M), /* V5T. */
11541 T(V7E_M), /* V5TE. */
11542 T(V7E_M), /* V5TEJ. */
11543 T(V7E_M), /* V6. */
11544 T(V7E_M), /* V6KZ. */
11545 T(V7E_M), /* V6T2. */
11546 T(V7E_M), /* V6K. */
11547 T(V7E_M), /* V7. */
11548 T(V7E_M), /* V6_M. */
11549 T(V7E_M), /* V6S_M. */
11550 T(V7E_M) /* V7E_M. */
11551 };
11552 const int v8[] =
11553 {
11554 T(V8), /* PRE_V4. */
11555 T(V8), /* V4. */
11556 T(V8), /* V4T. */
11557 T(V8), /* V5T. */
11558 T(V8), /* V5TE. */
11559 T(V8), /* V5TEJ. */
11560 T(V8), /* V6. */
11561 T(V8), /* V6KZ. */
11562 T(V8), /* V6T2. */
11563 T(V8), /* V6K. */
11564 T(V8), /* V7. */
11565 T(V8), /* V6_M. */
11566 T(V8), /* V6S_M. */
11567 T(V8), /* V7E_M. */
11568 T(V8) /* V8. */
11569 };
11570 const int v4t_plus_v6_m[] =
11571 {
11572 -1, /* PRE_V4. */
11573 -1, /* V4. */
11574 T(V4T), /* V4T. */
11575 T(V5T), /* V5T. */
11576 T(V5TE), /* V5TE. */
11577 T(V5TEJ), /* V5TEJ. */
11578 T(V6), /* V6. */
11579 T(V6KZ), /* V6KZ. */
11580 T(V6T2), /* V6T2. */
11581 T(V6K), /* V6K. */
11582 T(V7), /* V7. */
11583 T(V6_M), /* V6_M. */
11584 T(V6S_M), /* V6S_M. */
11585 T(V7E_M), /* V7E_M. */
11586 T(V8), /* V8. */
11587 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11588 };
11589 const int *comb[] =
11590 {
11591 v6t2,
11592 v6k,
11593 v7,
11594 v6_m,
11595 v6s_m,
11596 v7e_m,
11597 v8,
11598 /* Pseudo-architecture. */
11599 v4t_plus_v6_m
11600 };
11601
11602 /* Check we've not got a higher architecture than we know about. */
11603
11604 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11605 {
11606 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11607 return -1;
11608 }
11609
11610 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11611
11612 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11613 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11614 oldtag = T(V4T_PLUS_V6_M);
11615
11616 /* And override the new tag if we have a Tag_also_compatible_with on the
11617 input. */
11618
11619 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11620 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11621 newtag = T(V4T_PLUS_V6_M);
11622
11623 tagl = (oldtag < newtag) ? oldtag : newtag;
11624 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11625
11626 /* Architectures before V6KZ add features monotonically. */
11627 if (tagh <= TAG_CPU_ARCH_V6KZ)
11628 return result;
11629
11630 result = comb[tagh - T(V6T2)][tagl];
11631
11632 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11633 as the canonical version. */
11634 if (result == T(V4T_PLUS_V6_M))
11635 {
11636 result = T(V4T);
11637 *secondary_compat_out = T(V6_M);
11638 }
11639 else
11640 *secondary_compat_out = -1;
11641
11642 if (result == -1)
11643 {
11644 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11645 ibfd, oldtag, newtag);
11646 return -1;
11647 }
11648
11649 return result;
11650 #undef T
11651 }
11652
11653 /* Query attributes object to see if integer divide instructions may be
11654 present in an object. */
11655 static bfd_boolean
11656 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11657 {
11658 int arch = attr[Tag_CPU_arch].i;
11659 int profile = attr[Tag_CPU_arch_profile].i;
11660
11661 switch (attr[Tag_DIV_use].i)
11662 {
11663 case 0:
11664 /* Integer divide allowed if instruction contained in archetecture. */
11665 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11666 return TRUE;
11667 else if (arch >= TAG_CPU_ARCH_V7E_M)
11668 return TRUE;
11669 else
11670 return FALSE;
11671
11672 case 1:
11673 /* Integer divide explicitly prohibited. */
11674 return FALSE;
11675
11676 default:
11677 /* Unrecognised case - treat as allowing divide everywhere. */
11678 case 2:
11679 /* Integer divide allowed in ARM state. */
11680 return TRUE;
11681 }
11682 }
11683
11684 /* Query attributes object to see if integer divide instructions are
11685 forbidden to be in the object. This is not the inverse of
11686 elf32_arm_attributes_accept_div. */
11687 static bfd_boolean
11688 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11689 {
11690 return attr[Tag_DIV_use].i == 1;
11691 }
11692
11693 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11694 are conflicting attributes. */
11695
11696 static bfd_boolean
11697 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11698 {
11699 obj_attribute *in_attr;
11700 obj_attribute *out_attr;
11701 /* Some tags have 0 = don't care, 1 = strong requirement,
11702 2 = weak requirement. */
11703 static const int order_021[3] = {0, 2, 1};
11704 int i;
11705 bfd_boolean result = TRUE;
11706 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
11707
11708 /* Skip the linker stubs file. This preserves previous behavior
11709 of accepting unknown attributes in the first input file - but
11710 is that a bug? */
11711 if (ibfd->flags & BFD_LINKER_CREATED)
11712 return TRUE;
11713
11714 /* Skip any input that hasn't attribute section.
11715 This enables to link object files without attribute section with
11716 any others. */
11717 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
11718 return TRUE;
11719
11720 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11721 {
11722 /* This is the first object. Copy the attributes. */
11723 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11724
11725 out_attr = elf_known_obj_attributes_proc (obfd);
11726
11727 /* Use the Tag_null value to indicate the attributes have been
11728 initialized. */
11729 out_attr[0].i = 1;
11730
11731 /* We do not output objects with Tag_MPextension_use_legacy - we move
11732 the attribute's value to Tag_MPextension_use. */
11733 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11734 {
11735 if (out_attr[Tag_MPextension_use].i != 0
11736 && out_attr[Tag_MPextension_use_legacy].i
11737 != out_attr[Tag_MPextension_use].i)
11738 {
11739 _bfd_error_handler
11740 (_("Error: %B has both the current and legacy "
11741 "Tag_MPextension_use attributes"), ibfd);
11742 result = FALSE;
11743 }
11744
11745 out_attr[Tag_MPextension_use] =
11746 out_attr[Tag_MPextension_use_legacy];
11747 out_attr[Tag_MPextension_use_legacy].type = 0;
11748 out_attr[Tag_MPextension_use_legacy].i = 0;
11749 }
11750
11751 return result;
11752 }
11753
11754 in_attr = elf_known_obj_attributes_proc (ibfd);
11755 out_attr = elf_known_obj_attributes_proc (obfd);
11756 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11757 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11758 {
11759 /* Ignore mismatches if the object doesn't use floating point or is
11760 floating point ABI independent. */
11761 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
11762 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
11763 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
11764 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11765 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
11766 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
11767 {
11768 _bfd_error_handler
11769 (_("error: %B uses VFP register arguments, %B does not"),
11770 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11771 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11772 result = FALSE;
11773 }
11774 }
11775
11776 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11777 {
11778 /* Merge this attribute with existing attributes. */
11779 switch (i)
11780 {
11781 case Tag_CPU_raw_name:
11782 case Tag_CPU_name:
11783 /* These are merged after Tag_CPU_arch. */
11784 break;
11785
11786 case Tag_ABI_optimization_goals:
11787 case Tag_ABI_FP_optimization_goals:
11788 /* Use the first value seen. */
11789 break;
11790
11791 case Tag_CPU_arch:
11792 {
11793 int secondary_compat = -1, secondary_compat_out = -1;
11794 unsigned int saved_out_attr = out_attr[i].i;
11795 int arch_attr;
11796 static const char *name_table[] =
11797 {
11798 /* These aren't real CPU names, but we can't guess
11799 that from the architecture version alone. */
11800 "Pre v4",
11801 "ARM v4",
11802 "ARM v4T",
11803 "ARM v5T",
11804 "ARM v5TE",
11805 "ARM v5TEJ",
11806 "ARM v6",
11807 "ARM v6KZ",
11808 "ARM v6T2",
11809 "ARM v6K",
11810 "ARM v7",
11811 "ARM v6-M",
11812 "ARM v6S-M",
11813 "ARM v8"
11814 };
11815
11816 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11817 secondary_compat = get_secondary_compatible_arch (ibfd);
11818 secondary_compat_out = get_secondary_compatible_arch (obfd);
11819 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11820 &secondary_compat_out,
11821 in_attr[i].i,
11822 secondary_compat);
11823
11824 /* Return with error if failed to merge. */
11825 if (arch_attr == -1)
11826 return FALSE;
11827
11828 out_attr[i].i = arch_attr;
11829
11830 set_secondary_compatible_arch (obfd, secondary_compat_out);
11831
11832 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11833 if (out_attr[i].i == saved_out_attr)
11834 ; /* Leave the names alone. */
11835 else if (out_attr[i].i == in_attr[i].i)
11836 {
11837 /* The output architecture has been changed to match the
11838 input architecture. Use the input names. */
11839 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11840 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11841 : NULL;
11842 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11843 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11844 : NULL;
11845 }
11846 else
11847 {
11848 out_attr[Tag_CPU_name].s = NULL;
11849 out_attr[Tag_CPU_raw_name].s = NULL;
11850 }
11851
11852 /* If we still don't have a value for Tag_CPU_name,
11853 make one up now. Tag_CPU_raw_name remains blank. */
11854 if (out_attr[Tag_CPU_name].s == NULL
11855 && out_attr[i].i < ARRAY_SIZE (name_table))
11856 out_attr[Tag_CPU_name].s =
11857 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11858 }
11859 break;
11860
11861 case Tag_ARM_ISA_use:
11862 case Tag_THUMB_ISA_use:
11863 case Tag_WMMX_arch:
11864 case Tag_Advanced_SIMD_arch:
11865 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11866 case Tag_ABI_FP_rounding:
11867 case Tag_ABI_FP_exceptions:
11868 case Tag_ABI_FP_user_exceptions:
11869 case Tag_ABI_FP_number_model:
11870 case Tag_FP_HP_extension:
11871 case Tag_CPU_unaligned_access:
11872 case Tag_T2EE_use:
11873 case Tag_MPextension_use:
11874 /* Use the largest value specified. */
11875 if (in_attr[i].i > out_attr[i].i)
11876 out_attr[i].i = in_attr[i].i;
11877 break;
11878
11879 case Tag_ABI_align_preserved:
11880 case Tag_ABI_PCS_RO_data:
11881 /* Use the smallest value specified. */
11882 if (in_attr[i].i < out_attr[i].i)
11883 out_attr[i].i = in_attr[i].i;
11884 break;
11885
11886 case Tag_ABI_align_needed:
11887 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11888 && (in_attr[Tag_ABI_align_preserved].i == 0
11889 || out_attr[Tag_ABI_align_preserved].i == 0))
11890 {
11891 /* This error message should be enabled once all non-conformant
11892 binaries in the toolchain have had the attributes set
11893 properly.
11894 _bfd_error_handler
11895 (_("error: %B: 8-byte data alignment conflicts with %B"),
11896 obfd, ibfd);
11897 result = FALSE; */
11898 }
11899 /* Fall through. */
11900 case Tag_ABI_FP_denormal:
11901 case Tag_ABI_PCS_GOT_use:
11902 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11903 value if greater than 2 (for future-proofing). */
11904 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11905 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11906 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11907 out_attr[i].i = in_attr[i].i;
11908 break;
11909
11910 case Tag_Virtualization_use:
11911 /* The virtualization tag effectively stores two bits of
11912 information: the intended use of TrustZone (in bit 0), and the
11913 intended use of Virtualization (in bit 1). */
11914 if (out_attr[i].i == 0)
11915 out_attr[i].i = in_attr[i].i;
11916 else if (in_attr[i].i != 0
11917 && in_attr[i].i != out_attr[i].i)
11918 {
11919 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11920 out_attr[i].i = 3;
11921 else
11922 {
11923 _bfd_error_handler
11924 (_("error: %B: unable to merge virtualization attributes "
11925 "with %B"),
11926 obfd, ibfd);
11927 result = FALSE;
11928 }
11929 }
11930 break;
11931
11932 case Tag_CPU_arch_profile:
11933 if (out_attr[i].i != in_attr[i].i)
11934 {
11935 /* 0 will merge with anything.
11936 'A' and 'S' merge to 'A'.
11937 'R' and 'S' merge to 'R'.
11938 'M' and 'A|R|S' is an error. */
11939 if (out_attr[i].i == 0
11940 || (out_attr[i].i == 'S'
11941 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11942 out_attr[i].i = in_attr[i].i;
11943 else if (in_attr[i].i == 0
11944 || (in_attr[i].i == 'S'
11945 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11946 ; /* Do nothing. */
11947 else
11948 {
11949 _bfd_error_handler
11950 (_("error: %B: Conflicting architecture profiles %c/%c"),
11951 ibfd,
11952 in_attr[i].i ? in_attr[i].i : '0',
11953 out_attr[i].i ? out_attr[i].i : '0');
11954 result = FALSE;
11955 }
11956 }
11957 break;
11958 case Tag_FP_arch:
11959 {
11960 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11961 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11962 when it's 0. It might mean absence of FP hardware if
11963 Tag_FP_arch is zero. */
11964
11965 #define VFP_VERSION_COUNT 9
11966 static const struct
11967 {
11968 int ver;
11969 int regs;
11970 } vfp_versions[VFP_VERSION_COUNT] =
11971 {
11972 {0, 0},
11973 {1, 16},
11974 {2, 16},
11975 {3, 32},
11976 {3, 16},
11977 {4, 32},
11978 {4, 16},
11979 {8, 32},
11980 {8, 16}
11981 };
11982 int ver;
11983 int regs;
11984 int newval;
11985
11986 /* If the output has no requirement about FP hardware,
11987 follow the requirement of the input. */
11988 if (out_attr[i].i == 0)
11989 {
11990 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11991 out_attr[i].i = in_attr[i].i;
11992 out_attr[Tag_ABI_HardFP_use].i
11993 = in_attr[Tag_ABI_HardFP_use].i;
11994 break;
11995 }
11996 /* If the input has no requirement about FP hardware, do
11997 nothing. */
11998 else if (in_attr[i].i == 0)
11999 {
12000 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
12001 break;
12002 }
12003
12004 /* Both the input and the output have nonzero Tag_FP_arch.
12005 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
12006
12007 /* If both the input and the output have zero Tag_ABI_HardFP_use,
12008 do nothing. */
12009 if (in_attr[Tag_ABI_HardFP_use].i == 0
12010 && out_attr[Tag_ABI_HardFP_use].i == 0)
12011 ;
12012 /* If the input and the output have different Tag_ABI_HardFP_use,
12013 the combination of them is 0 (implied by Tag_FP_arch). */
12014 else if (in_attr[Tag_ABI_HardFP_use].i
12015 != out_attr[Tag_ABI_HardFP_use].i)
12016 out_attr[Tag_ABI_HardFP_use].i = 0;
12017
12018 /* Now we can handle Tag_FP_arch. */
12019
12020 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
12021 pick the biggest. */
12022 if (in_attr[i].i >= VFP_VERSION_COUNT
12023 && in_attr[i].i > out_attr[i].i)
12024 {
12025 out_attr[i] = in_attr[i];
12026 break;
12027 }
12028 /* The output uses the superset of input features
12029 (ISA version) and registers. */
12030 ver = vfp_versions[in_attr[i].i].ver;
12031 if (ver < vfp_versions[out_attr[i].i].ver)
12032 ver = vfp_versions[out_attr[i].i].ver;
12033 regs = vfp_versions[in_attr[i].i].regs;
12034 if (regs < vfp_versions[out_attr[i].i].regs)
12035 regs = vfp_versions[out_attr[i].i].regs;
12036 /* This assumes all possible supersets are also a valid
12037 options. */
12038 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
12039 {
12040 if (regs == vfp_versions[newval].regs
12041 && ver == vfp_versions[newval].ver)
12042 break;
12043 }
12044 out_attr[i].i = newval;
12045 }
12046 break;
12047 case Tag_PCS_config:
12048 if (out_attr[i].i == 0)
12049 out_attr[i].i = in_attr[i].i;
12050 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
12051 {
12052 /* It's sometimes ok to mix different configs, so this is only
12053 a warning. */
12054 _bfd_error_handler
12055 (_("Warning: %B: Conflicting platform configuration"), ibfd);
12056 }
12057 break;
12058 case Tag_ABI_PCS_R9_use:
12059 if (in_attr[i].i != out_attr[i].i
12060 && out_attr[i].i != AEABI_R9_unused
12061 && in_attr[i].i != AEABI_R9_unused)
12062 {
12063 _bfd_error_handler
12064 (_("error: %B: Conflicting use of R9"), ibfd);
12065 result = FALSE;
12066 }
12067 if (out_attr[i].i == AEABI_R9_unused)
12068 out_attr[i].i = in_attr[i].i;
12069 break;
12070 case Tag_ABI_PCS_RW_data:
12071 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
12072 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
12073 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
12074 {
12075 _bfd_error_handler
12076 (_("error: %B: SB relative addressing conflicts with use of R9"),
12077 ibfd);
12078 result = FALSE;
12079 }
12080 /* Use the smallest value specified. */
12081 if (in_attr[i].i < out_attr[i].i)
12082 out_attr[i].i = in_attr[i].i;
12083 break;
12084 case Tag_ABI_PCS_wchar_t:
12085 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
12086 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
12087 {
12088 _bfd_error_handler
12089 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
12090 ibfd, in_attr[i].i, out_attr[i].i);
12091 }
12092 else if (in_attr[i].i && !out_attr[i].i)
12093 out_attr[i].i = in_attr[i].i;
12094 break;
12095 case Tag_ABI_enum_size:
12096 if (in_attr[i].i != AEABI_enum_unused)
12097 {
12098 if (out_attr[i].i == AEABI_enum_unused
12099 || out_attr[i].i == AEABI_enum_forced_wide)
12100 {
12101 /* The existing object is compatible with anything.
12102 Use whatever requirements the new object has. */
12103 out_attr[i].i = in_attr[i].i;
12104 }
12105 else if (in_attr[i].i != AEABI_enum_forced_wide
12106 && out_attr[i].i != in_attr[i].i
12107 && !elf_arm_tdata (obfd)->no_enum_size_warning)
12108 {
12109 static const char *aeabi_enum_names[] =
12110 { "", "variable-size", "32-bit", "" };
12111 const char *in_name =
12112 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12113 ? aeabi_enum_names[in_attr[i].i]
12114 : "<unknown>";
12115 const char *out_name =
12116 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
12117 ? aeabi_enum_names[out_attr[i].i]
12118 : "<unknown>";
12119 _bfd_error_handler
12120 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
12121 ibfd, in_name, out_name);
12122 }
12123 }
12124 break;
12125 case Tag_ABI_VFP_args:
12126 /* Aready done. */
12127 break;
12128 case Tag_ABI_WMMX_args:
12129 if (in_attr[i].i != out_attr[i].i)
12130 {
12131 _bfd_error_handler
12132 (_("error: %B uses iWMMXt register arguments, %B does not"),
12133 ibfd, obfd);
12134 result = FALSE;
12135 }
12136 break;
12137 case Tag_compatibility:
12138 /* Merged in target-independent code. */
12139 break;
12140 case Tag_ABI_HardFP_use:
12141 /* This is handled along with Tag_FP_arch. */
12142 break;
12143 case Tag_ABI_FP_16bit_format:
12144 if (in_attr[i].i != 0 && out_attr[i].i != 0)
12145 {
12146 if (in_attr[i].i != out_attr[i].i)
12147 {
12148 _bfd_error_handler
12149 (_("error: fp16 format mismatch between %B and %B"),
12150 ibfd, obfd);
12151 result = FALSE;
12152 }
12153 }
12154 if (in_attr[i].i != 0)
12155 out_attr[i].i = in_attr[i].i;
12156 break;
12157
12158 case Tag_DIV_use:
12159 /* A value of zero on input means that the divide instruction may
12160 be used if available in the base architecture as specified via
12161 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
12162 the user did not want divide instructions. A value of 2
12163 explicitly means that divide instructions were allowed in ARM
12164 and Thumb state. */
12165 if (in_attr[i].i == out_attr[i].i)
12166 /* Do nothing. */ ;
12167 else if (elf32_arm_attributes_forbid_div (in_attr)
12168 && !elf32_arm_attributes_accept_div (out_attr))
12169 out_attr[i].i = 1;
12170 else if (elf32_arm_attributes_forbid_div (out_attr)
12171 && elf32_arm_attributes_accept_div (in_attr))
12172 out_attr[i].i = in_attr[i].i;
12173 else if (in_attr[i].i == 2)
12174 out_attr[i].i = in_attr[i].i;
12175 break;
12176
12177 case Tag_MPextension_use_legacy:
12178 /* We don't output objects with Tag_MPextension_use_legacy - we
12179 move the value to Tag_MPextension_use. */
12180 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
12181 {
12182 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
12183 {
12184 _bfd_error_handler
12185 (_("%B has has both the current and legacy "
12186 "Tag_MPextension_use attributes"),
12187 ibfd);
12188 result = FALSE;
12189 }
12190 }
12191
12192 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
12193 out_attr[Tag_MPextension_use] = in_attr[i];
12194
12195 break;
12196
12197 case Tag_nodefaults:
12198 /* This tag is set if it exists, but the value is unused (and is
12199 typically zero). We don't actually need to do anything here -
12200 the merge happens automatically when the type flags are merged
12201 below. */
12202 break;
12203 case Tag_also_compatible_with:
12204 /* Already done in Tag_CPU_arch. */
12205 break;
12206 case Tag_conformance:
12207 /* Keep the attribute if it matches. Throw it away otherwise.
12208 No attribute means no claim to conform. */
12209 if (!in_attr[i].s || !out_attr[i].s
12210 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
12211 out_attr[i].s = NULL;
12212 break;
12213
12214 default:
12215 result
12216 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
12217 }
12218
12219 /* If out_attr was copied from in_attr then it won't have a type yet. */
12220 if (in_attr[i].type && !out_attr[i].type)
12221 out_attr[i].type = in_attr[i].type;
12222 }
12223
12224 /* Merge Tag_compatibility attributes and any common GNU ones. */
12225 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
12226 return FALSE;
12227
12228 /* Check for any attributes not known on ARM. */
12229 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
12230
12231 return result;
12232 }
12233
12234
12235 /* Return TRUE if the two EABI versions are incompatible. */
12236
12237 static bfd_boolean
12238 elf32_arm_versions_compatible (unsigned iver, unsigned over)
12239 {
12240 /* v4 and v5 are the same spec before and after it was released,
12241 so allow mixing them. */
12242 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
12243 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
12244 return TRUE;
12245
12246 return (iver == over);
12247 }
12248
12249 /* Merge backend specific data from an object file to the output
12250 object file when linking. */
12251
12252 static bfd_boolean
12253 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
12254
12255 /* Display the flags field. */
12256
12257 static bfd_boolean
12258 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
12259 {
12260 FILE * file = (FILE *) ptr;
12261 unsigned long flags;
12262
12263 BFD_ASSERT (abfd != NULL && ptr != NULL);
12264
12265 /* Print normal ELF private data. */
12266 _bfd_elf_print_private_bfd_data (abfd, ptr);
12267
12268 flags = elf_elfheader (abfd)->e_flags;
12269 /* Ignore init flag - it may not be set, despite the flags field
12270 containing valid data. */
12271
12272 /* xgettext:c-format */
12273 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12274
12275 switch (EF_ARM_EABI_VERSION (flags))
12276 {
12277 case EF_ARM_EABI_UNKNOWN:
12278 /* The following flag bits are GNU extensions and not part of the
12279 official ARM ELF extended ABI. Hence they are only decoded if
12280 the EABI version is not set. */
12281 if (flags & EF_ARM_INTERWORK)
12282 fprintf (file, _(" [interworking enabled]"));
12283
12284 if (flags & EF_ARM_APCS_26)
12285 fprintf (file, " [APCS-26]");
12286 else
12287 fprintf (file, " [APCS-32]");
12288
12289 if (flags & EF_ARM_VFP_FLOAT)
12290 fprintf (file, _(" [VFP float format]"));
12291 else if (flags & EF_ARM_MAVERICK_FLOAT)
12292 fprintf (file, _(" [Maverick float format]"));
12293 else
12294 fprintf (file, _(" [FPA float format]"));
12295
12296 if (flags & EF_ARM_APCS_FLOAT)
12297 fprintf (file, _(" [floats passed in float registers]"));
12298
12299 if (flags & EF_ARM_PIC)
12300 fprintf (file, _(" [position independent]"));
12301
12302 if (flags & EF_ARM_NEW_ABI)
12303 fprintf (file, _(" [new ABI]"));
12304
12305 if (flags & EF_ARM_OLD_ABI)
12306 fprintf (file, _(" [old ABI]"));
12307
12308 if (flags & EF_ARM_SOFT_FLOAT)
12309 fprintf (file, _(" [software FP]"));
12310
12311 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12312 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12313 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12314 | EF_ARM_MAVERICK_FLOAT);
12315 break;
12316
12317 case EF_ARM_EABI_VER1:
12318 fprintf (file, _(" [Version1 EABI]"));
12319
12320 if (flags & EF_ARM_SYMSARESORTED)
12321 fprintf (file, _(" [sorted symbol table]"));
12322 else
12323 fprintf (file, _(" [unsorted symbol table]"));
12324
12325 flags &= ~ EF_ARM_SYMSARESORTED;
12326 break;
12327
12328 case EF_ARM_EABI_VER2:
12329 fprintf (file, _(" [Version2 EABI]"));
12330
12331 if (flags & EF_ARM_SYMSARESORTED)
12332 fprintf (file, _(" [sorted symbol table]"));
12333 else
12334 fprintf (file, _(" [unsorted symbol table]"));
12335
12336 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12337 fprintf (file, _(" [dynamic symbols use segment index]"));
12338
12339 if (flags & EF_ARM_MAPSYMSFIRST)
12340 fprintf (file, _(" [mapping symbols precede others]"));
12341
12342 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12343 | EF_ARM_MAPSYMSFIRST);
12344 break;
12345
12346 case EF_ARM_EABI_VER3:
12347 fprintf (file, _(" [Version3 EABI]"));
12348 break;
12349
12350 case EF_ARM_EABI_VER4:
12351 fprintf (file, _(" [Version4 EABI]"));
12352 goto eabi;
12353
12354 case EF_ARM_EABI_VER5:
12355 fprintf (file, _(" [Version5 EABI]"));
12356
12357 if (flags & EF_ARM_ABI_FLOAT_SOFT)
12358 fprintf (file, _(" [soft-float ABI]"));
12359
12360 if (flags & EF_ARM_ABI_FLOAT_HARD)
12361 fprintf (file, _(" [hard-float ABI]"));
12362
12363 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
12364
12365 eabi:
12366 if (flags & EF_ARM_BE8)
12367 fprintf (file, _(" [BE8]"));
12368
12369 if (flags & EF_ARM_LE8)
12370 fprintf (file, _(" [LE8]"));
12371
12372 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12373 break;
12374
12375 default:
12376 fprintf (file, _(" <EABI version unrecognised>"));
12377 break;
12378 }
12379
12380 flags &= ~ EF_ARM_EABIMASK;
12381
12382 if (flags & EF_ARM_RELEXEC)
12383 fprintf (file, _(" [relocatable executable]"));
12384
12385 flags &= ~EF_ARM_RELEXEC;
12386
12387 if (flags)
12388 fprintf (file, _("<Unrecognised flag bits set>"));
12389
12390 fputc ('\n', file);
12391
12392 return TRUE;
12393 }
12394
12395 static int
12396 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12397 {
12398 switch (ELF_ST_TYPE (elf_sym->st_info))
12399 {
12400 case STT_ARM_TFUNC:
12401 return ELF_ST_TYPE (elf_sym->st_info);
12402
12403 case STT_ARM_16BIT:
12404 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12405 This allows us to distinguish between data used by Thumb instructions
12406 and non-data (which is probably code) inside Thumb regions of an
12407 executable. */
12408 if (type != STT_OBJECT && type != STT_TLS)
12409 return ELF_ST_TYPE (elf_sym->st_info);
12410 break;
12411
12412 default:
12413 break;
12414 }
12415
12416 return type;
12417 }
12418
12419 static asection *
12420 elf32_arm_gc_mark_hook (asection *sec,
12421 struct bfd_link_info *info,
12422 Elf_Internal_Rela *rel,
12423 struct elf_link_hash_entry *h,
12424 Elf_Internal_Sym *sym)
12425 {
12426 if (h != NULL)
12427 switch (ELF32_R_TYPE (rel->r_info))
12428 {
12429 case R_ARM_GNU_VTINHERIT:
12430 case R_ARM_GNU_VTENTRY:
12431 return NULL;
12432 }
12433
12434 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12435 }
12436
12437 /* Update the got entry reference counts for the section being removed. */
12438
12439 static bfd_boolean
12440 elf32_arm_gc_sweep_hook (bfd * abfd,
12441 struct bfd_link_info * info,
12442 asection * sec,
12443 const Elf_Internal_Rela * relocs)
12444 {
12445 Elf_Internal_Shdr *symtab_hdr;
12446 struct elf_link_hash_entry **sym_hashes;
12447 bfd_signed_vma *local_got_refcounts;
12448 const Elf_Internal_Rela *rel, *relend;
12449 struct elf32_arm_link_hash_table * globals;
12450
12451 if (info->relocatable)
12452 return TRUE;
12453
12454 globals = elf32_arm_hash_table (info);
12455 if (globals == NULL)
12456 return FALSE;
12457
12458 elf_section_data (sec)->local_dynrel = NULL;
12459
12460 symtab_hdr = & elf_symtab_hdr (abfd);
12461 sym_hashes = elf_sym_hashes (abfd);
12462 local_got_refcounts = elf_local_got_refcounts (abfd);
12463
12464 check_use_blx (globals);
12465
12466 relend = relocs + sec->reloc_count;
12467 for (rel = relocs; rel < relend; rel++)
12468 {
12469 unsigned long r_symndx;
12470 struct elf_link_hash_entry *h = NULL;
12471 struct elf32_arm_link_hash_entry *eh;
12472 int r_type;
12473 bfd_boolean call_reloc_p;
12474 bfd_boolean may_become_dynamic_p;
12475 bfd_boolean may_need_local_target_p;
12476 union gotplt_union *root_plt;
12477 struct arm_plt_info *arm_plt;
12478
12479 r_symndx = ELF32_R_SYM (rel->r_info);
12480 if (r_symndx >= symtab_hdr->sh_info)
12481 {
12482 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12483 while (h->root.type == bfd_link_hash_indirect
12484 || h->root.type == bfd_link_hash_warning)
12485 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12486 }
12487 eh = (struct elf32_arm_link_hash_entry *) h;
12488
12489 call_reloc_p = FALSE;
12490 may_become_dynamic_p = FALSE;
12491 may_need_local_target_p = FALSE;
12492
12493 r_type = ELF32_R_TYPE (rel->r_info);
12494 r_type = arm_real_reloc_type (globals, r_type);
12495 switch (r_type)
12496 {
12497 case R_ARM_GOT32:
12498 case R_ARM_GOT_PREL:
12499 case R_ARM_TLS_GD32:
12500 case R_ARM_TLS_IE32:
12501 if (h != NULL)
12502 {
12503 if (h->got.refcount > 0)
12504 h->got.refcount -= 1;
12505 }
12506 else if (local_got_refcounts != NULL)
12507 {
12508 if (local_got_refcounts[r_symndx] > 0)
12509 local_got_refcounts[r_symndx] -= 1;
12510 }
12511 break;
12512
12513 case R_ARM_TLS_LDM32:
12514 globals->tls_ldm_got.refcount -= 1;
12515 break;
12516
12517 case R_ARM_PC24:
12518 case R_ARM_PLT32:
12519 case R_ARM_CALL:
12520 case R_ARM_JUMP24:
12521 case R_ARM_PREL31:
12522 case R_ARM_THM_CALL:
12523 case R_ARM_THM_JUMP24:
12524 case R_ARM_THM_JUMP19:
12525 call_reloc_p = TRUE;
12526 may_need_local_target_p = TRUE;
12527 break;
12528
12529 case R_ARM_ABS12:
12530 if (!globals->vxworks_p)
12531 {
12532 may_need_local_target_p = TRUE;
12533 break;
12534 }
12535 /* Fall through. */
12536 case R_ARM_ABS32:
12537 case R_ARM_ABS32_NOI:
12538 case R_ARM_REL32:
12539 case R_ARM_REL32_NOI:
12540 case R_ARM_MOVW_ABS_NC:
12541 case R_ARM_MOVT_ABS:
12542 case R_ARM_MOVW_PREL_NC:
12543 case R_ARM_MOVT_PREL:
12544 case R_ARM_THM_MOVW_ABS_NC:
12545 case R_ARM_THM_MOVT_ABS:
12546 case R_ARM_THM_MOVW_PREL_NC:
12547 case R_ARM_THM_MOVT_PREL:
12548 /* Should the interworking branches be here also? */
12549 if ((info->shared || globals->root.is_relocatable_executable)
12550 && (sec->flags & SEC_ALLOC) != 0)
12551 {
12552 if (h == NULL
12553 && elf32_arm_howto_from_type (r_type)->pc_relative)
12554 {
12555 call_reloc_p = TRUE;
12556 may_need_local_target_p = TRUE;
12557 }
12558 else
12559 may_become_dynamic_p = TRUE;
12560 }
12561 else
12562 may_need_local_target_p = TRUE;
12563 break;
12564
12565 default:
12566 break;
12567 }
12568
12569 if (may_need_local_target_p
12570 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12571 {
12572 /* If PLT refcount book-keeping is wrong and too low, we'll
12573 see a zero value (going to -1) for the root PLT reference
12574 count. */
12575 if (root_plt->refcount >= 0)
12576 {
12577 BFD_ASSERT (root_plt->refcount != 0);
12578 root_plt->refcount -= 1;
12579 }
12580 else
12581 /* A value of -1 means the symbol has become local, forced
12582 or seeing a hidden definition. Any other negative value
12583 is an error. */
12584 BFD_ASSERT (root_plt->refcount == -1);
12585
12586 if (!call_reloc_p)
12587 arm_plt->noncall_refcount--;
12588
12589 if (r_type == R_ARM_THM_CALL)
12590 arm_plt->maybe_thumb_refcount--;
12591
12592 if (r_type == R_ARM_THM_JUMP24
12593 || r_type == R_ARM_THM_JUMP19)
12594 arm_plt->thumb_refcount--;
12595 }
12596
12597 if (may_become_dynamic_p)
12598 {
12599 struct elf_dyn_relocs **pp;
12600 struct elf_dyn_relocs *p;
12601
12602 if (h != NULL)
12603 pp = &(eh->dyn_relocs);
12604 else
12605 {
12606 Elf_Internal_Sym *isym;
12607
12608 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12609 abfd, r_symndx);
12610 if (isym == NULL)
12611 return FALSE;
12612 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12613 if (pp == NULL)
12614 return FALSE;
12615 }
12616 for (; (p = *pp) != NULL; pp = &p->next)
12617 if (p->sec == sec)
12618 {
12619 /* Everything must go for SEC. */
12620 *pp = p->next;
12621 break;
12622 }
12623 }
12624 }
12625
12626 return TRUE;
12627 }
12628
12629 /* Look through the relocs for a section during the first phase. */
12630
12631 static bfd_boolean
12632 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12633 asection *sec, const Elf_Internal_Rela *relocs)
12634 {
12635 Elf_Internal_Shdr *symtab_hdr;
12636 struct elf_link_hash_entry **sym_hashes;
12637 const Elf_Internal_Rela *rel;
12638 const Elf_Internal_Rela *rel_end;
12639 bfd *dynobj;
12640 asection *sreloc;
12641 struct elf32_arm_link_hash_table *htab;
12642 bfd_boolean call_reloc_p;
12643 bfd_boolean may_become_dynamic_p;
12644 bfd_boolean may_need_local_target_p;
12645 unsigned long nsyms;
12646
12647 if (info->relocatable)
12648 return TRUE;
12649
12650 BFD_ASSERT (is_arm_elf (abfd));
12651
12652 htab = elf32_arm_hash_table (info);
12653 if (htab == NULL)
12654 return FALSE;
12655
12656 sreloc = NULL;
12657
12658 /* Create dynamic sections for relocatable executables so that we can
12659 copy relocations. */
12660 if (htab->root.is_relocatable_executable
12661 && ! htab->root.dynamic_sections_created)
12662 {
12663 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12664 return FALSE;
12665 }
12666
12667 if (htab->root.dynobj == NULL)
12668 htab->root.dynobj = abfd;
12669 if (!create_ifunc_sections (info))
12670 return FALSE;
12671
12672 dynobj = htab->root.dynobj;
12673
12674 symtab_hdr = & elf_symtab_hdr (abfd);
12675 sym_hashes = elf_sym_hashes (abfd);
12676 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12677
12678 rel_end = relocs + sec->reloc_count;
12679 for (rel = relocs; rel < rel_end; rel++)
12680 {
12681 Elf_Internal_Sym *isym;
12682 struct elf_link_hash_entry *h;
12683 struct elf32_arm_link_hash_entry *eh;
12684 unsigned long r_symndx;
12685 int r_type;
12686
12687 r_symndx = ELF32_R_SYM (rel->r_info);
12688 r_type = ELF32_R_TYPE (rel->r_info);
12689 r_type = arm_real_reloc_type (htab, r_type);
12690
12691 if (r_symndx >= nsyms
12692 /* PR 9934: It is possible to have relocations that do not
12693 refer to symbols, thus it is also possible to have an
12694 object file containing relocations but no symbol table. */
12695 && (r_symndx > STN_UNDEF || nsyms > 0))
12696 {
12697 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12698 r_symndx);
12699 return FALSE;
12700 }
12701
12702 h = NULL;
12703 isym = NULL;
12704 if (nsyms > 0)
12705 {
12706 if (r_symndx < symtab_hdr->sh_info)
12707 {
12708 /* A local symbol. */
12709 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12710 abfd, r_symndx);
12711 if (isym == NULL)
12712 return FALSE;
12713 }
12714 else
12715 {
12716 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12717 while (h->root.type == bfd_link_hash_indirect
12718 || h->root.type == bfd_link_hash_warning)
12719 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12720
12721 /* PR15323, ref flags aren't set for references in the
12722 same object. */
12723 h->root.non_ir_ref = 1;
12724 }
12725 }
12726
12727 eh = (struct elf32_arm_link_hash_entry *) h;
12728
12729 call_reloc_p = FALSE;
12730 may_become_dynamic_p = FALSE;
12731 may_need_local_target_p = FALSE;
12732
12733 /* Could be done earlier, if h were already available. */
12734 r_type = elf32_arm_tls_transition (info, r_type, h);
12735 switch (r_type)
12736 {
12737 case R_ARM_GOT32:
12738 case R_ARM_GOT_PREL:
12739 case R_ARM_TLS_GD32:
12740 case R_ARM_TLS_IE32:
12741 case R_ARM_TLS_GOTDESC:
12742 case R_ARM_TLS_DESCSEQ:
12743 case R_ARM_THM_TLS_DESCSEQ:
12744 case R_ARM_TLS_CALL:
12745 case R_ARM_THM_TLS_CALL:
12746 /* This symbol requires a global offset table entry. */
12747 {
12748 int tls_type, old_tls_type;
12749
12750 switch (r_type)
12751 {
12752 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12753
12754 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12755
12756 case R_ARM_TLS_GOTDESC:
12757 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12758 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12759 tls_type = GOT_TLS_GDESC; break;
12760
12761 default: tls_type = GOT_NORMAL; break;
12762 }
12763
12764 if (!info->executable && (tls_type & GOT_TLS_IE))
12765 info->flags |= DF_STATIC_TLS;
12766
12767 if (h != NULL)
12768 {
12769 h->got.refcount++;
12770 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12771 }
12772 else
12773 {
12774 /* This is a global offset table entry for a local symbol. */
12775 if (!elf32_arm_allocate_local_sym_info (abfd))
12776 return FALSE;
12777 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12778 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12779 }
12780
12781 /* If a variable is accessed with both tls methods, two
12782 slots may be created. */
12783 if (GOT_TLS_GD_ANY_P (old_tls_type)
12784 && GOT_TLS_GD_ANY_P (tls_type))
12785 tls_type |= old_tls_type;
12786
12787 /* We will already have issued an error message if there
12788 is a TLS/non-TLS mismatch, based on the symbol
12789 type. So just combine any TLS types needed. */
12790 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12791 && tls_type != GOT_NORMAL)
12792 tls_type |= old_tls_type;
12793
12794 /* If the symbol is accessed in both IE and GDESC
12795 method, we're able to relax. Turn off the GDESC flag,
12796 without messing up with any other kind of tls types
12797 that may be involved. */
12798 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12799 tls_type &= ~GOT_TLS_GDESC;
12800
12801 if (old_tls_type != tls_type)
12802 {
12803 if (h != NULL)
12804 elf32_arm_hash_entry (h)->tls_type = tls_type;
12805 else
12806 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12807 }
12808 }
12809 /* Fall through. */
12810
12811 case R_ARM_TLS_LDM32:
12812 if (r_type == R_ARM_TLS_LDM32)
12813 htab->tls_ldm_got.refcount++;
12814 /* Fall through. */
12815
12816 case R_ARM_GOTOFF32:
12817 case R_ARM_GOTPC:
12818 if (htab->root.sgot == NULL
12819 && !create_got_section (htab->root.dynobj, info))
12820 return FALSE;
12821 break;
12822
12823 case R_ARM_PC24:
12824 case R_ARM_PLT32:
12825 case R_ARM_CALL:
12826 case R_ARM_JUMP24:
12827 case R_ARM_PREL31:
12828 case R_ARM_THM_CALL:
12829 case R_ARM_THM_JUMP24:
12830 case R_ARM_THM_JUMP19:
12831 call_reloc_p = TRUE;
12832 may_need_local_target_p = TRUE;
12833 break;
12834
12835 case R_ARM_ABS12:
12836 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12837 ldr __GOTT_INDEX__ offsets. */
12838 if (!htab->vxworks_p)
12839 {
12840 may_need_local_target_p = TRUE;
12841 break;
12842 }
12843 /* Fall through. */
12844
12845 case R_ARM_MOVW_ABS_NC:
12846 case R_ARM_MOVT_ABS:
12847 case R_ARM_THM_MOVW_ABS_NC:
12848 case R_ARM_THM_MOVT_ABS:
12849 if (info->shared)
12850 {
12851 (*_bfd_error_handler)
12852 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12853 abfd, elf32_arm_howto_table_1[r_type].name,
12854 (h) ? h->root.root.string : "a local symbol");
12855 bfd_set_error (bfd_error_bad_value);
12856 return FALSE;
12857 }
12858
12859 /* Fall through. */
12860 case R_ARM_ABS32:
12861 case R_ARM_ABS32_NOI:
12862 if (h != NULL && info->executable)
12863 {
12864 h->pointer_equality_needed = 1;
12865 }
12866 /* Fall through. */
12867 case R_ARM_REL32:
12868 case R_ARM_REL32_NOI:
12869 case R_ARM_MOVW_PREL_NC:
12870 case R_ARM_MOVT_PREL:
12871 case R_ARM_THM_MOVW_PREL_NC:
12872 case R_ARM_THM_MOVT_PREL:
12873
12874 /* Should the interworking branches be listed here? */
12875 if ((info->shared || htab->root.is_relocatable_executable)
12876 && (sec->flags & SEC_ALLOC) != 0)
12877 {
12878 if (h == NULL
12879 && elf32_arm_howto_from_type (r_type)->pc_relative)
12880 {
12881 /* In shared libraries and relocatable executables,
12882 we treat local relative references as calls;
12883 see the related SYMBOL_CALLS_LOCAL code in
12884 allocate_dynrelocs. */
12885 call_reloc_p = TRUE;
12886 may_need_local_target_p = TRUE;
12887 }
12888 else
12889 /* We are creating a shared library or relocatable
12890 executable, and this is a reloc against a global symbol,
12891 or a non-PC-relative reloc against a local symbol.
12892 We may need to copy the reloc into the output. */
12893 may_become_dynamic_p = TRUE;
12894 }
12895 else
12896 may_need_local_target_p = TRUE;
12897 break;
12898
12899 /* This relocation describes the C++ object vtable hierarchy.
12900 Reconstruct it for later use during GC. */
12901 case R_ARM_GNU_VTINHERIT:
12902 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12903 return FALSE;
12904 break;
12905
12906 /* This relocation describes which C++ vtable entries are actually
12907 used. Record for later use during GC. */
12908 case R_ARM_GNU_VTENTRY:
12909 BFD_ASSERT (h != NULL);
12910 if (h != NULL
12911 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12912 return FALSE;
12913 break;
12914 }
12915
12916 if (h != NULL)
12917 {
12918 if (call_reloc_p)
12919 /* We may need a .plt entry if the function this reloc
12920 refers to is in a different object, regardless of the
12921 symbol's type. We can't tell for sure yet, because
12922 something later might force the symbol local. */
12923 h->needs_plt = 1;
12924 else if (may_need_local_target_p)
12925 /* If this reloc is in a read-only section, we might
12926 need a copy reloc. We can't check reliably at this
12927 stage whether the section is read-only, as input
12928 sections have not yet been mapped to output sections.
12929 Tentatively set the flag for now, and correct in
12930 adjust_dynamic_symbol. */
12931 h->non_got_ref = 1;
12932 }
12933
12934 if (may_need_local_target_p
12935 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12936 {
12937 union gotplt_union *root_plt;
12938 struct arm_plt_info *arm_plt;
12939 struct arm_local_iplt_info *local_iplt;
12940
12941 if (h != NULL)
12942 {
12943 root_plt = &h->plt;
12944 arm_plt = &eh->plt;
12945 }
12946 else
12947 {
12948 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12949 if (local_iplt == NULL)
12950 return FALSE;
12951 root_plt = &local_iplt->root;
12952 arm_plt = &local_iplt->arm;
12953 }
12954
12955 /* If the symbol is a function that doesn't bind locally,
12956 this relocation will need a PLT entry. */
12957 if (root_plt->refcount != -1)
12958 root_plt->refcount += 1;
12959
12960 if (!call_reloc_p)
12961 arm_plt->noncall_refcount++;
12962
12963 /* It's too early to use htab->use_blx here, so we have to
12964 record possible blx references separately from
12965 relocs that definitely need a thumb stub. */
12966
12967 if (r_type == R_ARM_THM_CALL)
12968 arm_plt->maybe_thumb_refcount += 1;
12969
12970 if (r_type == R_ARM_THM_JUMP24
12971 || r_type == R_ARM_THM_JUMP19)
12972 arm_plt->thumb_refcount += 1;
12973 }
12974
12975 if (may_become_dynamic_p)
12976 {
12977 struct elf_dyn_relocs *p, **head;
12978
12979 /* Create a reloc section in dynobj. */
12980 if (sreloc == NULL)
12981 {
12982 sreloc = _bfd_elf_make_dynamic_reloc_section
12983 (sec, dynobj, 2, abfd, ! htab->use_rel);
12984
12985 if (sreloc == NULL)
12986 return FALSE;
12987
12988 /* BPABI objects never have dynamic relocations mapped. */
12989 if (htab->symbian_p)
12990 {
12991 flagword flags;
12992
12993 flags = bfd_get_section_flags (dynobj, sreloc);
12994 flags &= ~(SEC_LOAD | SEC_ALLOC);
12995 bfd_set_section_flags (dynobj, sreloc, flags);
12996 }
12997 }
12998
12999 /* If this is a global symbol, count the number of
13000 relocations we need for this symbol. */
13001 if (h != NULL)
13002 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
13003 else
13004 {
13005 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13006 if (head == NULL)
13007 return FALSE;
13008 }
13009
13010 p = *head;
13011 if (p == NULL || p->sec != sec)
13012 {
13013 bfd_size_type amt = sizeof *p;
13014
13015 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
13016 if (p == NULL)
13017 return FALSE;
13018 p->next = *head;
13019 *head = p;
13020 p->sec = sec;
13021 p->count = 0;
13022 p->pc_count = 0;
13023 }
13024
13025 if (elf32_arm_howto_from_type (r_type)->pc_relative)
13026 p->pc_count += 1;
13027 p->count += 1;
13028 }
13029 }
13030
13031 return TRUE;
13032 }
13033
13034 /* Unwinding tables are not referenced directly. This pass marks them as
13035 required if the corresponding code section is marked. */
13036
13037 static bfd_boolean
13038 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
13039 elf_gc_mark_hook_fn gc_mark_hook)
13040 {
13041 bfd *sub;
13042 Elf_Internal_Shdr **elf_shdrp;
13043 bfd_boolean again;
13044
13045 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
13046
13047 /* Marking EH data may cause additional code sections to be marked,
13048 requiring multiple passes. */
13049 again = TRUE;
13050 while (again)
13051 {
13052 again = FALSE;
13053 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
13054 {
13055 asection *o;
13056
13057 if (! is_arm_elf (sub))
13058 continue;
13059
13060 elf_shdrp = elf_elfsections (sub);
13061 for (o = sub->sections; o != NULL; o = o->next)
13062 {
13063 Elf_Internal_Shdr *hdr;
13064
13065 hdr = &elf_section_data (o)->this_hdr;
13066 if (hdr->sh_type == SHT_ARM_EXIDX
13067 && hdr->sh_link
13068 && hdr->sh_link < elf_numsections (sub)
13069 && !o->gc_mark
13070 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
13071 {
13072 again = TRUE;
13073 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
13074 return FALSE;
13075 }
13076 }
13077 }
13078 }
13079
13080 return TRUE;
13081 }
13082
13083 /* Treat mapping symbols as special target symbols. */
13084
13085 static bfd_boolean
13086 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
13087 {
13088 return bfd_is_arm_special_symbol_name (sym->name,
13089 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
13090 }
13091
13092 /* This is a copy of elf_find_function() from elf.c except that
13093 ARM mapping symbols are ignored when looking for function names
13094 and STT_ARM_TFUNC is considered to a function type. */
13095
13096 static bfd_boolean
13097 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
13098 asymbol ** symbols,
13099 asection * section,
13100 bfd_vma offset,
13101 const char ** filename_ptr,
13102 const char ** functionname_ptr)
13103 {
13104 const char * filename = NULL;
13105 asymbol * func = NULL;
13106 bfd_vma low_func = 0;
13107 asymbol ** p;
13108
13109 for (p = symbols; *p != NULL; p++)
13110 {
13111 elf_symbol_type *q;
13112
13113 q = (elf_symbol_type *) *p;
13114
13115 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
13116 {
13117 default:
13118 break;
13119 case STT_FILE:
13120 filename = bfd_asymbol_name (&q->symbol);
13121 break;
13122 case STT_FUNC:
13123 case STT_ARM_TFUNC:
13124 case STT_NOTYPE:
13125 /* Skip mapping symbols. */
13126 if ((q->symbol.flags & BSF_LOCAL)
13127 && bfd_is_arm_special_symbol_name (q->symbol.name,
13128 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
13129 continue;
13130 /* Fall through. */
13131 if (bfd_get_section (&q->symbol) == section
13132 && q->symbol.value >= low_func
13133 && q->symbol.value <= offset)
13134 {
13135 func = (asymbol *) q;
13136 low_func = q->symbol.value;
13137 }
13138 break;
13139 }
13140 }
13141
13142 if (func == NULL)
13143 return FALSE;
13144
13145 if (filename_ptr)
13146 *filename_ptr = filename;
13147 if (functionname_ptr)
13148 *functionname_ptr = bfd_asymbol_name (func);
13149
13150 return TRUE;
13151 }
13152
13153
13154 /* Find the nearest line to a particular section and offset, for error
13155 reporting. This code is a duplicate of the code in elf.c, except
13156 that it uses arm_elf_find_function. */
13157
13158 static bfd_boolean
13159 elf32_arm_find_nearest_line (bfd * abfd,
13160 asymbol ** symbols,
13161 asection * section,
13162 bfd_vma offset,
13163 const char ** filename_ptr,
13164 const char ** functionname_ptr,
13165 unsigned int * line_ptr,
13166 unsigned int * discriminator_ptr)
13167 {
13168 bfd_boolean found = FALSE;
13169
13170 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
13171 filename_ptr, functionname_ptr,
13172 line_ptr, discriminator_ptr,
13173 dwarf_debug_sections, 0,
13174 & elf_tdata (abfd)->dwarf2_find_line_info))
13175 {
13176 if (!*functionname_ptr)
13177 arm_elf_find_function (abfd, symbols, section, offset,
13178 *filename_ptr ? NULL : filename_ptr,
13179 functionname_ptr);
13180
13181 return TRUE;
13182 }
13183
13184 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
13185 uses DWARF1. */
13186
13187 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
13188 & found, filename_ptr,
13189 functionname_ptr, line_ptr,
13190 & elf_tdata (abfd)->line_info))
13191 return FALSE;
13192
13193 if (found && (*functionname_ptr || *line_ptr))
13194 return TRUE;
13195
13196 if (symbols == NULL)
13197 return FALSE;
13198
13199 if (! arm_elf_find_function (abfd, symbols, section, offset,
13200 filename_ptr, functionname_ptr))
13201 return FALSE;
13202
13203 *line_ptr = 0;
13204 return TRUE;
13205 }
13206
13207 static bfd_boolean
13208 elf32_arm_find_inliner_info (bfd * abfd,
13209 const char ** filename_ptr,
13210 const char ** functionname_ptr,
13211 unsigned int * line_ptr)
13212 {
13213 bfd_boolean found;
13214 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
13215 functionname_ptr, line_ptr,
13216 & elf_tdata (abfd)->dwarf2_find_line_info);
13217 return found;
13218 }
13219
13220 /* Adjust a symbol defined by a dynamic object and referenced by a
13221 regular object. The current definition is in some section of the
13222 dynamic object, but we're not including those sections. We have to
13223 change the definition to something the rest of the link can
13224 understand. */
13225
13226 static bfd_boolean
13227 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
13228 struct elf_link_hash_entry * h)
13229 {
13230 bfd * dynobj;
13231 asection * s;
13232 struct elf32_arm_link_hash_entry * eh;
13233 struct elf32_arm_link_hash_table *globals;
13234
13235 globals = elf32_arm_hash_table (info);
13236 if (globals == NULL)
13237 return FALSE;
13238
13239 dynobj = elf_hash_table (info)->dynobj;
13240
13241 /* Make sure we know what is going on here. */
13242 BFD_ASSERT (dynobj != NULL
13243 && (h->needs_plt
13244 || h->type == STT_GNU_IFUNC
13245 || h->u.weakdef != NULL
13246 || (h->def_dynamic
13247 && h->ref_regular
13248 && !h->def_regular)));
13249
13250 eh = (struct elf32_arm_link_hash_entry *) h;
13251
13252 /* If this is a function, put it in the procedure linkage table. We
13253 will fill in the contents of the procedure linkage table later,
13254 when we know the address of the .got section. */
13255 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
13256 {
13257 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
13258 symbol binds locally. */
13259 if (h->plt.refcount <= 0
13260 || (h->type != STT_GNU_IFUNC
13261 && (SYMBOL_CALLS_LOCAL (info, h)
13262 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
13263 && h->root.type == bfd_link_hash_undefweak))))
13264 {
13265 /* This case can occur if we saw a PLT32 reloc in an input
13266 file, but the symbol was never referred to by a dynamic
13267 object, or if all references were garbage collected. In
13268 such a case, we don't actually need to build a procedure
13269 linkage table, and we can just do a PC24 reloc instead. */
13270 h->plt.offset = (bfd_vma) -1;
13271 eh->plt.thumb_refcount = 0;
13272 eh->plt.maybe_thumb_refcount = 0;
13273 eh->plt.noncall_refcount = 0;
13274 h->needs_plt = 0;
13275 }
13276
13277 return TRUE;
13278 }
13279 else
13280 {
13281 /* It's possible that we incorrectly decided a .plt reloc was
13282 needed for an R_ARM_PC24 or similar reloc to a non-function sym
13283 in check_relocs. We can't decide accurately between function
13284 and non-function syms in check-relocs; Objects loaded later in
13285 the link may change h->type. So fix it now. */
13286 h->plt.offset = (bfd_vma) -1;
13287 eh->plt.thumb_refcount = 0;
13288 eh->plt.maybe_thumb_refcount = 0;
13289 eh->plt.noncall_refcount = 0;
13290 }
13291
13292 /* If this is a weak symbol, and there is a real definition, the
13293 processor independent code will have arranged for us to see the
13294 real definition first, and we can just use the same value. */
13295 if (h->u.weakdef != NULL)
13296 {
13297 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13298 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13299 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13300 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13301 return TRUE;
13302 }
13303
13304 /* If there are no non-GOT references, we do not need a copy
13305 relocation. */
13306 if (!h->non_got_ref)
13307 return TRUE;
13308
13309 /* This is a reference to a symbol defined by a dynamic object which
13310 is not a function. */
13311
13312 /* If we are creating a shared library, we must presume that the
13313 only references to the symbol are via the global offset table.
13314 For such cases we need not do anything here; the relocations will
13315 be handled correctly by relocate_section. Relocatable executables
13316 can reference data in shared objects directly, so we don't need to
13317 do anything here. */
13318 if (info->shared || globals->root.is_relocatable_executable)
13319 return TRUE;
13320
13321 /* We must allocate the symbol in our .dynbss section, which will
13322 become part of the .bss section of the executable. There will be
13323 an entry for this symbol in the .dynsym section. The dynamic
13324 object will contain position independent code, so all references
13325 from the dynamic object to this symbol will go through the global
13326 offset table. The dynamic linker will use the .dynsym entry to
13327 determine the address it must put in the global offset table, so
13328 both the dynamic object and the regular object will refer to the
13329 same memory location for the variable. */
13330 s = bfd_get_linker_section (dynobj, ".dynbss");
13331 BFD_ASSERT (s != NULL);
13332
13333 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13334 copy the initial value out of the dynamic object and into the
13335 runtime process image. We need to remember the offset into the
13336 .rel(a).bss section we are going to use. */
13337 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13338 {
13339 asection *srel;
13340
13341 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
13342 elf32_arm_allocate_dynrelocs (info, srel, 1);
13343 h->needs_copy = 1;
13344 }
13345
13346 return _bfd_elf_adjust_dynamic_copy (info, h, s);
13347 }
13348
13349 /* Allocate space in .plt, .got and associated reloc sections for
13350 dynamic relocs. */
13351
13352 static bfd_boolean
13353 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13354 {
13355 struct bfd_link_info *info;
13356 struct elf32_arm_link_hash_table *htab;
13357 struct elf32_arm_link_hash_entry *eh;
13358 struct elf_dyn_relocs *p;
13359
13360 if (h->root.type == bfd_link_hash_indirect)
13361 return TRUE;
13362
13363 eh = (struct elf32_arm_link_hash_entry *) h;
13364
13365 info = (struct bfd_link_info *) inf;
13366 htab = elf32_arm_hash_table (info);
13367 if (htab == NULL)
13368 return FALSE;
13369
13370 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13371 && h->plt.refcount > 0)
13372 {
13373 /* Make sure this symbol is output as a dynamic symbol.
13374 Undefined weak syms won't yet be marked as dynamic. */
13375 if (h->dynindx == -1
13376 && !h->forced_local)
13377 {
13378 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13379 return FALSE;
13380 }
13381
13382 /* If the call in the PLT entry binds locally, the associated
13383 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13384 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13385 than the .plt section. */
13386 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13387 {
13388 eh->is_iplt = 1;
13389 if (eh->plt.noncall_refcount == 0
13390 && SYMBOL_REFERENCES_LOCAL (info, h))
13391 /* All non-call references can be resolved directly.
13392 This means that they can (and in some cases, must)
13393 resolve directly to the run-time target, rather than
13394 to the PLT. That in turns means that any .got entry
13395 would be equal to the .igot.plt entry, so there's
13396 no point having both. */
13397 h->got.refcount = 0;
13398 }
13399
13400 if (info->shared
13401 || eh->is_iplt
13402 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13403 {
13404 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13405
13406 /* If this symbol is not defined in a regular file, and we are
13407 not generating a shared library, then set the symbol to this
13408 location in the .plt. This is required to make function
13409 pointers compare as equal between the normal executable and
13410 the shared library. */
13411 if (! info->shared
13412 && !h->def_regular)
13413 {
13414 h->root.u.def.section = htab->root.splt;
13415 h->root.u.def.value = h->plt.offset;
13416
13417 /* Make sure the function is not marked as Thumb, in case
13418 it is the target of an ABS32 relocation, which will
13419 point to the PLT entry. */
13420 h->target_internal = ST_BRANCH_TO_ARM;
13421 }
13422
13423 /* VxWorks executables have a second set of relocations for
13424 each PLT entry. They go in a separate relocation section,
13425 which is processed by the kernel loader. */
13426 if (htab->vxworks_p && !info->shared)
13427 {
13428 /* There is a relocation for the initial PLT entry:
13429 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13430 if (h->plt.offset == htab->plt_header_size)
13431 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13432
13433 /* There are two extra relocations for each subsequent
13434 PLT entry: an R_ARM_32 relocation for the GOT entry,
13435 and an R_ARM_32 relocation for the PLT entry. */
13436 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13437 }
13438 }
13439 else
13440 {
13441 h->plt.offset = (bfd_vma) -1;
13442 h->needs_plt = 0;
13443 }
13444 }
13445 else
13446 {
13447 h->plt.offset = (bfd_vma) -1;
13448 h->needs_plt = 0;
13449 }
13450
13451 eh = (struct elf32_arm_link_hash_entry *) h;
13452 eh->tlsdesc_got = (bfd_vma) -1;
13453
13454 if (h->got.refcount > 0)
13455 {
13456 asection *s;
13457 bfd_boolean dyn;
13458 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13459 int indx;
13460
13461 /* Make sure this symbol is output as a dynamic symbol.
13462 Undefined weak syms won't yet be marked as dynamic. */
13463 if (h->dynindx == -1
13464 && !h->forced_local)
13465 {
13466 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13467 return FALSE;
13468 }
13469
13470 if (!htab->symbian_p)
13471 {
13472 s = htab->root.sgot;
13473 h->got.offset = s->size;
13474
13475 if (tls_type == GOT_UNKNOWN)
13476 abort ();
13477
13478 if (tls_type == GOT_NORMAL)
13479 /* Non-TLS symbols need one GOT slot. */
13480 s->size += 4;
13481 else
13482 {
13483 if (tls_type & GOT_TLS_GDESC)
13484 {
13485 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13486 eh->tlsdesc_got
13487 = (htab->root.sgotplt->size
13488 - elf32_arm_compute_jump_table_size (htab));
13489 htab->root.sgotplt->size += 8;
13490 h->got.offset = (bfd_vma) -2;
13491 /* plt.got_offset needs to know there's a TLS_DESC
13492 reloc in the middle of .got.plt. */
13493 htab->num_tls_desc++;
13494 }
13495
13496 if (tls_type & GOT_TLS_GD)
13497 {
13498 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13499 the symbol is both GD and GDESC, got.offset may
13500 have been overwritten. */
13501 h->got.offset = s->size;
13502 s->size += 8;
13503 }
13504
13505 if (tls_type & GOT_TLS_IE)
13506 /* R_ARM_TLS_IE32 needs one GOT slot. */
13507 s->size += 4;
13508 }
13509
13510 dyn = htab->root.dynamic_sections_created;
13511
13512 indx = 0;
13513 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13514 && (!info->shared
13515 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13516 indx = h->dynindx;
13517
13518 if (tls_type != GOT_NORMAL
13519 && (info->shared || indx != 0)
13520 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13521 || h->root.type != bfd_link_hash_undefweak))
13522 {
13523 if (tls_type & GOT_TLS_IE)
13524 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13525
13526 if (tls_type & GOT_TLS_GD)
13527 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13528
13529 if (tls_type & GOT_TLS_GDESC)
13530 {
13531 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13532 /* GDESC needs a trampoline to jump to. */
13533 htab->tls_trampoline = -1;
13534 }
13535
13536 /* Only GD needs it. GDESC just emits one relocation per
13537 2 entries. */
13538 if ((tls_type & GOT_TLS_GD) && indx != 0)
13539 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13540 }
13541 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
13542 {
13543 if (htab->root.dynamic_sections_created)
13544 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13545 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13546 }
13547 else if (h->type == STT_GNU_IFUNC
13548 && eh->plt.noncall_refcount == 0)
13549 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13550 they all resolve dynamically instead. Reserve room for the
13551 GOT entry's R_ARM_IRELATIVE relocation. */
13552 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13553 else if (info->shared && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13554 || h->root.type != bfd_link_hash_undefweak))
13555 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13556 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13557 }
13558 }
13559 else
13560 h->got.offset = (bfd_vma) -1;
13561
13562 /* Allocate stubs for exported Thumb functions on v4t. */
13563 if (!htab->use_blx && h->dynindx != -1
13564 && h->def_regular
13565 && h->target_internal == ST_BRANCH_TO_THUMB
13566 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13567 {
13568 struct elf_link_hash_entry * th;
13569 struct bfd_link_hash_entry * bh;
13570 struct elf_link_hash_entry * myh;
13571 char name[1024];
13572 asection *s;
13573 bh = NULL;
13574 /* Create a new symbol to regist the real location of the function. */
13575 s = h->root.u.def.section;
13576 sprintf (name, "__real_%s", h->root.root.string);
13577 _bfd_generic_link_add_one_symbol (info, s->owner,
13578 name, BSF_GLOBAL, s,
13579 h->root.u.def.value,
13580 NULL, TRUE, FALSE, &bh);
13581
13582 myh = (struct elf_link_hash_entry *) bh;
13583 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13584 myh->forced_local = 1;
13585 myh->target_internal = ST_BRANCH_TO_THUMB;
13586 eh->export_glue = myh;
13587 th = record_arm_to_thumb_glue (info, h);
13588 /* Point the symbol at the stub. */
13589 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13590 h->target_internal = ST_BRANCH_TO_ARM;
13591 h->root.u.def.section = th->root.u.def.section;
13592 h->root.u.def.value = th->root.u.def.value & ~1;
13593 }
13594
13595 if (eh->dyn_relocs == NULL)
13596 return TRUE;
13597
13598 /* In the shared -Bsymbolic case, discard space allocated for
13599 dynamic pc-relative relocs against symbols which turn out to be
13600 defined in regular objects. For the normal shared case, discard
13601 space for pc-relative relocs that have become local due to symbol
13602 visibility changes. */
13603
13604 if (info->shared || htab->root.is_relocatable_executable)
13605 {
13606 /* Relocs that use pc_count are PC-relative forms, which will appear
13607 on something like ".long foo - ." or "movw REG, foo - .". We want
13608 calls to protected symbols to resolve directly to the function
13609 rather than going via the plt. If people want function pointer
13610 comparisons to work as expected then they should avoid writing
13611 assembly like ".long foo - .". */
13612 if (SYMBOL_CALLS_LOCAL (info, h))
13613 {
13614 struct elf_dyn_relocs **pp;
13615
13616 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13617 {
13618 p->count -= p->pc_count;
13619 p->pc_count = 0;
13620 if (p->count == 0)
13621 *pp = p->next;
13622 else
13623 pp = &p->next;
13624 }
13625 }
13626
13627 if (htab->vxworks_p)
13628 {
13629 struct elf_dyn_relocs **pp;
13630
13631 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13632 {
13633 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13634 *pp = p->next;
13635 else
13636 pp = &p->next;
13637 }
13638 }
13639
13640 /* Also discard relocs on undefined weak syms with non-default
13641 visibility. */
13642 if (eh->dyn_relocs != NULL
13643 && h->root.type == bfd_link_hash_undefweak)
13644 {
13645 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13646 eh->dyn_relocs = NULL;
13647
13648 /* Make sure undefined weak symbols are output as a dynamic
13649 symbol in PIEs. */
13650 else if (h->dynindx == -1
13651 && !h->forced_local)
13652 {
13653 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13654 return FALSE;
13655 }
13656 }
13657
13658 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13659 && h->root.type == bfd_link_hash_new)
13660 {
13661 /* Output absolute symbols so that we can create relocations
13662 against them. For normal symbols we output a relocation
13663 against the section that contains them. */
13664 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13665 return FALSE;
13666 }
13667
13668 }
13669 else
13670 {
13671 /* For the non-shared case, discard space for relocs against
13672 symbols which turn out to need copy relocs or are not
13673 dynamic. */
13674
13675 if (!h->non_got_ref
13676 && ((h->def_dynamic
13677 && !h->def_regular)
13678 || (htab->root.dynamic_sections_created
13679 && (h->root.type == bfd_link_hash_undefweak
13680 || h->root.type == bfd_link_hash_undefined))))
13681 {
13682 /* Make sure this symbol is output as a dynamic symbol.
13683 Undefined weak syms won't yet be marked as dynamic. */
13684 if (h->dynindx == -1
13685 && !h->forced_local)
13686 {
13687 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13688 return FALSE;
13689 }
13690
13691 /* If that succeeded, we know we'll be keeping all the
13692 relocs. */
13693 if (h->dynindx != -1)
13694 goto keep;
13695 }
13696
13697 eh->dyn_relocs = NULL;
13698
13699 keep: ;
13700 }
13701
13702 /* Finally, allocate space. */
13703 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13704 {
13705 asection *sreloc = elf_section_data (p->sec)->sreloc;
13706 if (h->type == STT_GNU_IFUNC
13707 && eh->plt.noncall_refcount == 0
13708 && SYMBOL_REFERENCES_LOCAL (info, h))
13709 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13710 else
13711 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13712 }
13713
13714 return TRUE;
13715 }
13716
13717 /* Find any dynamic relocs that apply to read-only sections. */
13718
13719 static bfd_boolean
13720 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13721 {
13722 struct elf32_arm_link_hash_entry * eh;
13723 struct elf_dyn_relocs * p;
13724
13725 eh = (struct elf32_arm_link_hash_entry *) h;
13726 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13727 {
13728 asection *s = p->sec;
13729
13730 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13731 {
13732 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13733
13734 info->flags |= DF_TEXTREL;
13735
13736 /* Not an error, just cut short the traversal. */
13737 return FALSE;
13738 }
13739 }
13740 return TRUE;
13741 }
13742
13743 void
13744 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13745 int byteswap_code)
13746 {
13747 struct elf32_arm_link_hash_table *globals;
13748
13749 globals = elf32_arm_hash_table (info);
13750 if (globals == NULL)
13751 return;
13752
13753 globals->byteswap_code = byteswap_code;
13754 }
13755
13756 /* Set the sizes of the dynamic sections. */
13757
13758 static bfd_boolean
13759 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13760 struct bfd_link_info * info)
13761 {
13762 bfd * dynobj;
13763 asection * s;
13764 bfd_boolean plt;
13765 bfd_boolean relocs;
13766 bfd *ibfd;
13767 struct elf32_arm_link_hash_table *htab;
13768
13769 htab = elf32_arm_hash_table (info);
13770 if (htab == NULL)
13771 return FALSE;
13772
13773 dynobj = elf_hash_table (info)->dynobj;
13774 BFD_ASSERT (dynobj != NULL);
13775 check_use_blx (htab);
13776
13777 if (elf_hash_table (info)->dynamic_sections_created)
13778 {
13779 /* Set the contents of the .interp section to the interpreter. */
13780 if (info->executable)
13781 {
13782 s = bfd_get_linker_section (dynobj, ".interp");
13783 BFD_ASSERT (s != NULL);
13784 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13785 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13786 }
13787 }
13788
13789 /* Set up .got offsets for local syms, and space for local dynamic
13790 relocs. */
13791 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
13792 {
13793 bfd_signed_vma *local_got;
13794 bfd_signed_vma *end_local_got;
13795 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13796 char *local_tls_type;
13797 bfd_vma *local_tlsdesc_gotent;
13798 bfd_size_type locsymcount;
13799 Elf_Internal_Shdr *symtab_hdr;
13800 asection *srel;
13801 bfd_boolean is_vxworks = htab->vxworks_p;
13802 unsigned int symndx;
13803
13804 if (! is_arm_elf (ibfd))
13805 continue;
13806
13807 for (s = ibfd->sections; s != NULL; s = s->next)
13808 {
13809 struct elf_dyn_relocs *p;
13810
13811 for (p = (struct elf_dyn_relocs *)
13812 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13813 {
13814 if (!bfd_is_abs_section (p->sec)
13815 && bfd_is_abs_section (p->sec->output_section))
13816 {
13817 /* Input section has been discarded, either because
13818 it is a copy of a linkonce section or due to
13819 linker script /DISCARD/, so we'll be discarding
13820 the relocs too. */
13821 }
13822 else if (is_vxworks
13823 && strcmp (p->sec->output_section->name,
13824 ".tls_vars") == 0)
13825 {
13826 /* Relocations in vxworks .tls_vars sections are
13827 handled specially by the loader. */
13828 }
13829 else if (p->count != 0)
13830 {
13831 srel = elf_section_data (p->sec)->sreloc;
13832 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13833 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13834 info->flags |= DF_TEXTREL;
13835 }
13836 }
13837 }
13838
13839 local_got = elf_local_got_refcounts (ibfd);
13840 if (!local_got)
13841 continue;
13842
13843 symtab_hdr = & elf_symtab_hdr (ibfd);
13844 locsymcount = symtab_hdr->sh_info;
13845 end_local_got = local_got + locsymcount;
13846 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13847 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13848 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13849 symndx = 0;
13850 s = htab->root.sgot;
13851 srel = htab->root.srelgot;
13852 for (; local_got < end_local_got;
13853 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13854 ++local_tlsdesc_gotent, ++symndx)
13855 {
13856 *local_tlsdesc_gotent = (bfd_vma) -1;
13857 local_iplt = *local_iplt_ptr;
13858 if (local_iplt != NULL)
13859 {
13860 struct elf_dyn_relocs *p;
13861
13862 if (local_iplt->root.refcount > 0)
13863 {
13864 elf32_arm_allocate_plt_entry (info, TRUE,
13865 &local_iplt->root,
13866 &local_iplt->arm);
13867 if (local_iplt->arm.noncall_refcount == 0)
13868 /* All references to the PLT are calls, so all
13869 non-call references can resolve directly to the
13870 run-time target. This means that the .got entry
13871 would be the same as the .igot.plt entry, so there's
13872 no point creating both. */
13873 *local_got = 0;
13874 }
13875 else
13876 {
13877 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13878 local_iplt->root.offset = (bfd_vma) -1;
13879 }
13880
13881 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13882 {
13883 asection *psrel;
13884
13885 psrel = elf_section_data (p->sec)->sreloc;
13886 if (local_iplt->arm.noncall_refcount == 0)
13887 elf32_arm_allocate_irelocs (info, psrel, p->count);
13888 else
13889 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13890 }
13891 }
13892 if (*local_got > 0)
13893 {
13894 Elf_Internal_Sym *isym;
13895
13896 *local_got = s->size;
13897 if (*local_tls_type & GOT_TLS_GD)
13898 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13899 s->size += 8;
13900 if (*local_tls_type & GOT_TLS_GDESC)
13901 {
13902 *local_tlsdesc_gotent = htab->root.sgotplt->size
13903 - elf32_arm_compute_jump_table_size (htab);
13904 htab->root.sgotplt->size += 8;
13905 *local_got = (bfd_vma) -2;
13906 /* plt.got_offset needs to know there's a TLS_DESC
13907 reloc in the middle of .got.plt. */
13908 htab->num_tls_desc++;
13909 }
13910 if (*local_tls_type & GOT_TLS_IE)
13911 s->size += 4;
13912
13913 if (*local_tls_type & GOT_NORMAL)
13914 {
13915 /* If the symbol is both GD and GDESC, *local_got
13916 may have been overwritten. */
13917 *local_got = s->size;
13918 s->size += 4;
13919 }
13920
13921 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13922 if (isym == NULL)
13923 return FALSE;
13924
13925 /* If all references to an STT_GNU_IFUNC PLT are calls,
13926 then all non-call references, including this GOT entry,
13927 resolve directly to the run-time target. */
13928 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13929 && (local_iplt == NULL
13930 || local_iplt->arm.noncall_refcount == 0))
13931 elf32_arm_allocate_irelocs (info, srel, 1);
13932 else if (info->shared || output_bfd->flags & DYNAMIC)
13933 {
13934 if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13935 || *local_tls_type & GOT_TLS_GD)
13936 elf32_arm_allocate_dynrelocs (info, srel, 1);
13937
13938 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13939 {
13940 elf32_arm_allocate_dynrelocs (info,
13941 htab->root.srelplt, 1);
13942 htab->tls_trampoline = -1;
13943 }
13944 }
13945 }
13946 else
13947 *local_got = (bfd_vma) -1;
13948 }
13949 }
13950
13951 if (htab->tls_ldm_got.refcount > 0)
13952 {
13953 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13954 for R_ARM_TLS_LDM32 relocations. */
13955 htab->tls_ldm_got.offset = htab->root.sgot->size;
13956 htab->root.sgot->size += 8;
13957 if (info->shared)
13958 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13959 }
13960 else
13961 htab->tls_ldm_got.offset = -1;
13962
13963 /* Allocate global sym .plt and .got entries, and space for global
13964 sym dynamic relocs. */
13965 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13966
13967 /* Here we rummage through the found bfds to collect glue information. */
13968 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
13969 {
13970 if (! is_arm_elf (ibfd))
13971 continue;
13972
13973 /* Initialise mapping tables for code/data. */
13974 bfd_elf32_arm_init_maps (ibfd);
13975
13976 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13977 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13978 /* xgettext:c-format */
13979 _bfd_error_handler (_("Errors encountered processing file %s"),
13980 ibfd->filename);
13981 }
13982
13983 /* Allocate space for the glue sections now that we've sized them. */
13984 bfd_elf32_arm_allocate_interworking_sections (info);
13985
13986 /* For every jump slot reserved in the sgotplt, reloc_count is
13987 incremented. However, when we reserve space for TLS descriptors,
13988 it's not incremented, so in order to compute the space reserved
13989 for them, it suffices to multiply the reloc count by the jump
13990 slot size. */
13991 if (htab->root.srelplt)
13992 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13993
13994 if (htab->tls_trampoline)
13995 {
13996 if (htab->root.splt->size == 0)
13997 htab->root.splt->size += htab->plt_header_size;
13998
13999 htab->tls_trampoline = htab->root.splt->size;
14000 htab->root.splt->size += htab->plt_entry_size;
14001
14002 /* If we're not using lazy TLS relocations, don't generate the
14003 PLT and GOT entries they require. */
14004 if (!(info->flags & DF_BIND_NOW))
14005 {
14006 htab->dt_tlsdesc_got = htab->root.sgot->size;
14007 htab->root.sgot->size += 4;
14008
14009 htab->dt_tlsdesc_plt = htab->root.splt->size;
14010 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
14011 }
14012 }
14013
14014 /* The check_relocs and adjust_dynamic_symbol entry points have
14015 determined the sizes of the various dynamic sections. Allocate
14016 memory for them. */
14017 plt = FALSE;
14018 relocs = FALSE;
14019 for (s = dynobj->sections; s != NULL; s = s->next)
14020 {
14021 const char * name;
14022
14023 if ((s->flags & SEC_LINKER_CREATED) == 0)
14024 continue;
14025
14026 /* It's OK to base decisions on the section name, because none
14027 of the dynobj section names depend upon the input files. */
14028 name = bfd_get_section_name (dynobj, s);
14029
14030 if (s == htab->root.splt)
14031 {
14032 /* Remember whether there is a PLT. */
14033 plt = s->size != 0;
14034 }
14035 else if (CONST_STRNEQ (name, ".rel"))
14036 {
14037 if (s->size != 0)
14038 {
14039 /* Remember whether there are any reloc sections other
14040 than .rel(a).plt and .rela.plt.unloaded. */
14041 if (s != htab->root.srelplt && s != htab->srelplt2)
14042 relocs = TRUE;
14043
14044 /* We use the reloc_count field as a counter if we need
14045 to copy relocs into the output file. */
14046 s->reloc_count = 0;
14047 }
14048 }
14049 else if (s != htab->root.sgot
14050 && s != htab->root.sgotplt
14051 && s != htab->root.iplt
14052 && s != htab->root.igotplt
14053 && s != htab->sdynbss)
14054 {
14055 /* It's not one of our sections, so don't allocate space. */
14056 continue;
14057 }
14058
14059 if (s->size == 0)
14060 {
14061 /* If we don't need this section, strip it from the
14062 output file. This is mostly to handle .rel(a).bss and
14063 .rel(a).plt. We must create both sections in
14064 create_dynamic_sections, because they must be created
14065 before the linker maps input sections to output
14066 sections. The linker does that before
14067 adjust_dynamic_symbol is called, and it is that
14068 function which decides whether anything needs to go
14069 into these sections. */
14070 s->flags |= SEC_EXCLUDE;
14071 continue;
14072 }
14073
14074 if ((s->flags & SEC_HAS_CONTENTS) == 0)
14075 continue;
14076
14077 /* Allocate memory for the section contents. */
14078 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
14079 if (s->contents == NULL)
14080 return FALSE;
14081 }
14082
14083 if (elf_hash_table (info)->dynamic_sections_created)
14084 {
14085 /* Add some entries to the .dynamic section. We fill in the
14086 values later, in elf32_arm_finish_dynamic_sections, but we
14087 must add the entries now so that we get the correct size for
14088 the .dynamic section. The DT_DEBUG entry is filled in by the
14089 dynamic linker and used by the debugger. */
14090 #define add_dynamic_entry(TAG, VAL) \
14091 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
14092
14093 if (info->executable)
14094 {
14095 if (!add_dynamic_entry (DT_DEBUG, 0))
14096 return FALSE;
14097 }
14098
14099 if (plt)
14100 {
14101 if ( !add_dynamic_entry (DT_PLTGOT, 0)
14102 || !add_dynamic_entry (DT_PLTRELSZ, 0)
14103 || !add_dynamic_entry (DT_PLTREL,
14104 htab->use_rel ? DT_REL : DT_RELA)
14105 || !add_dynamic_entry (DT_JMPREL, 0))
14106 return FALSE;
14107
14108 if (htab->dt_tlsdesc_plt &&
14109 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
14110 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
14111 return FALSE;
14112 }
14113
14114 if (relocs)
14115 {
14116 if (htab->use_rel)
14117 {
14118 if (!add_dynamic_entry (DT_REL, 0)
14119 || !add_dynamic_entry (DT_RELSZ, 0)
14120 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
14121 return FALSE;
14122 }
14123 else
14124 {
14125 if (!add_dynamic_entry (DT_RELA, 0)
14126 || !add_dynamic_entry (DT_RELASZ, 0)
14127 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
14128 return FALSE;
14129 }
14130 }
14131
14132 /* If any dynamic relocs apply to a read-only section,
14133 then we need a DT_TEXTREL entry. */
14134 if ((info->flags & DF_TEXTREL) == 0)
14135 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
14136 info);
14137
14138 if ((info->flags & DF_TEXTREL) != 0)
14139 {
14140 if (!add_dynamic_entry (DT_TEXTREL, 0))
14141 return FALSE;
14142 }
14143 if (htab->vxworks_p
14144 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
14145 return FALSE;
14146 }
14147 #undef add_dynamic_entry
14148
14149 return TRUE;
14150 }
14151
14152 /* Size sections even though they're not dynamic. We use it to setup
14153 _TLS_MODULE_BASE_, if needed. */
14154
14155 static bfd_boolean
14156 elf32_arm_always_size_sections (bfd *output_bfd,
14157 struct bfd_link_info *info)
14158 {
14159 asection *tls_sec;
14160
14161 if (info->relocatable)
14162 return TRUE;
14163
14164 tls_sec = elf_hash_table (info)->tls_sec;
14165
14166 if (tls_sec)
14167 {
14168 struct elf_link_hash_entry *tlsbase;
14169
14170 tlsbase = elf_link_hash_lookup
14171 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
14172
14173 if (tlsbase)
14174 {
14175 struct bfd_link_hash_entry *bh = NULL;
14176 const struct elf_backend_data *bed
14177 = get_elf_backend_data (output_bfd);
14178
14179 if (!(_bfd_generic_link_add_one_symbol
14180 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
14181 tls_sec, 0, NULL, FALSE,
14182 bed->collect, &bh)))
14183 return FALSE;
14184
14185 tlsbase->type = STT_TLS;
14186 tlsbase = (struct elf_link_hash_entry *)bh;
14187 tlsbase->def_regular = 1;
14188 tlsbase->other = STV_HIDDEN;
14189 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
14190 }
14191 }
14192 return TRUE;
14193 }
14194
14195 /* Finish up dynamic symbol handling. We set the contents of various
14196 dynamic sections here. */
14197
14198 static bfd_boolean
14199 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
14200 struct bfd_link_info * info,
14201 struct elf_link_hash_entry * h,
14202 Elf_Internal_Sym * sym)
14203 {
14204 struct elf32_arm_link_hash_table *htab;
14205 struct elf32_arm_link_hash_entry *eh;
14206
14207 htab = elf32_arm_hash_table (info);
14208 if (htab == NULL)
14209 return FALSE;
14210
14211 eh = (struct elf32_arm_link_hash_entry *) h;
14212
14213 if (h->plt.offset != (bfd_vma) -1)
14214 {
14215 if (!eh->is_iplt)
14216 {
14217 BFD_ASSERT (h->dynindx != -1);
14218 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
14219 h->dynindx, 0))
14220 return FALSE;
14221 }
14222
14223 if (!h->def_regular)
14224 {
14225 /* Mark the symbol as undefined, rather than as defined in
14226 the .plt section. */
14227 sym->st_shndx = SHN_UNDEF;
14228 /* If the symbol is weak we need to clear the value.
14229 Otherwise, the PLT entry would provide a definition for
14230 the symbol even if the symbol wasn't defined anywhere,
14231 and so the symbol would never be NULL. Leave the value if
14232 there were any relocations where pointer equality matters
14233 (this is a clue for the dynamic linker, to make function
14234 pointer comparisons work between an application and shared
14235 library). */
14236 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
14237 sym->st_value = 0;
14238 }
14239 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
14240 {
14241 /* At least one non-call relocation references this .iplt entry,
14242 so the .iplt entry is the function's canonical address. */
14243 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
14244 sym->st_target_internal = ST_BRANCH_TO_ARM;
14245 sym->st_shndx = (_bfd_elf_section_from_bfd_section
14246 (output_bfd, htab->root.iplt->output_section));
14247 sym->st_value = (h->plt.offset
14248 + htab->root.iplt->output_section->vma
14249 + htab->root.iplt->output_offset);
14250 }
14251 }
14252
14253 if (h->needs_copy)
14254 {
14255 asection * s;
14256 Elf_Internal_Rela rel;
14257
14258 /* This symbol needs a copy reloc. Set it up. */
14259 BFD_ASSERT (h->dynindx != -1
14260 && (h->root.type == bfd_link_hash_defined
14261 || h->root.type == bfd_link_hash_defweak));
14262
14263 s = htab->srelbss;
14264 BFD_ASSERT (s != NULL);
14265
14266 rel.r_addend = 0;
14267 rel.r_offset = (h->root.u.def.value
14268 + h->root.u.def.section->output_section->vma
14269 + h->root.u.def.section->output_offset);
14270 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
14271 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
14272 }
14273
14274 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
14275 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
14276 to the ".got" section. */
14277 if (h == htab->root.hdynamic
14278 || (!htab->vxworks_p && h == htab->root.hgot))
14279 sym->st_shndx = SHN_ABS;
14280
14281 return TRUE;
14282 }
14283
14284 static void
14285 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14286 void *contents,
14287 const unsigned long *template, unsigned count)
14288 {
14289 unsigned ix;
14290
14291 for (ix = 0; ix != count; ix++)
14292 {
14293 unsigned long insn = template[ix];
14294
14295 /* Emit mov pc,rx if bx is not permitted. */
14296 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14297 insn = (insn & 0xf000000f) | 0x01a0f000;
14298 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14299 }
14300 }
14301
14302 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
14303 other variants, NaCl needs this entry in a static executable's
14304 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
14305 zero. For .iplt really only the last bundle is useful, and .iplt
14306 could have a shorter first entry, with each individual PLT entry's
14307 relative branch calculated differently so it targets the last
14308 bundle instead of the instruction before it (labelled .Lplt_tail
14309 above). But it's simpler to keep the size and layout of PLT0
14310 consistent with the dynamic case, at the cost of some dead code at
14311 the start of .iplt and the one dead store to the stack at the start
14312 of .Lplt_tail. */
14313 static void
14314 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
14315 asection *plt, bfd_vma got_displacement)
14316 {
14317 unsigned int i;
14318
14319 put_arm_insn (htab, output_bfd,
14320 elf32_arm_nacl_plt0_entry[0]
14321 | arm_movw_immediate (got_displacement),
14322 plt->contents + 0);
14323 put_arm_insn (htab, output_bfd,
14324 elf32_arm_nacl_plt0_entry[1]
14325 | arm_movt_immediate (got_displacement),
14326 plt->contents + 4);
14327
14328 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14329 put_arm_insn (htab, output_bfd,
14330 elf32_arm_nacl_plt0_entry[i],
14331 plt->contents + (i * 4));
14332 }
14333
14334 /* Finish up the dynamic sections. */
14335
14336 static bfd_boolean
14337 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14338 {
14339 bfd * dynobj;
14340 asection * sgot;
14341 asection * sdyn;
14342 struct elf32_arm_link_hash_table *htab;
14343
14344 htab = elf32_arm_hash_table (info);
14345 if (htab == NULL)
14346 return FALSE;
14347
14348 dynobj = elf_hash_table (info)->dynobj;
14349
14350 sgot = htab->root.sgotplt;
14351 /* A broken linker script might have discarded the dynamic sections.
14352 Catch this here so that we do not seg-fault later on. */
14353 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14354 return FALSE;
14355 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
14356
14357 if (elf_hash_table (info)->dynamic_sections_created)
14358 {
14359 asection *splt;
14360 Elf32_External_Dyn *dyncon, *dynconend;
14361
14362 splt = htab->root.splt;
14363 BFD_ASSERT (splt != NULL && sdyn != NULL);
14364 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14365
14366 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14367 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14368
14369 for (; dyncon < dynconend; dyncon++)
14370 {
14371 Elf_Internal_Dyn dyn;
14372 const char * name;
14373 asection * s;
14374
14375 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14376
14377 switch (dyn.d_tag)
14378 {
14379 unsigned int type;
14380
14381 default:
14382 if (htab->vxworks_p
14383 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14384 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14385 break;
14386
14387 case DT_HASH:
14388 name = ".hash";
14389 goto get_vma_if_bpabi;
14390 case DT_STRTAB:
14391 name = ".dynstr";
14392 goto get_vma_if_bpabi;
14393 case DT_SYMTAB:
14394 name = ".dynsym";
14395 goto get_vma_if_bpabi;
14396 case DT_VERSYM:
14397 name = ".gnu.version";
14398 goto get_vma_if_bpabi;
14399 case DT_VERDEF:
14400 name = ".gnu.version_d";
14401 goto get_vma_if_bpabi;
14402 case DT_VERNEED:
14403 name = ".gnu.version_r";
14404 goto get_vma_if_bpabi;
14405
14406 case DT_PLTGOT:
14407 name = ".got";
14408 goto get_vma;
14409 case DT_JMPREL:
14410 name = RELOC_SECTION (htab, ".plt");
14411 get_vma:
14412 s = bfd_get_section_by_name (output_bfd, name);
14413 if (s == NULL)
14414 {
14415 /* PR ld/14397: Issue an error message if a required section is missing. */
14416 (*_bfd_error_handler)
14417 (_("error: required section '%s' not found in the linker script"), name);
14418 bfd_set_error (bfd_error_invalid_operation);
14419 return FALSE;
14420 }
14421 if (!htab->symbian_p)
14422 dyn.d_un.d_ptr = s->vma;
14423 else
14424 /* In the BPABI, tags in the PT_DYNAMIC section point
14425 at the file offset, not the memory address, for the
14426 convenience of the post linker. */
14427 dyn.d_un.d_ptr = s->filepos;
14428 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14429 break;
14430
14431 get_vma_if_bpabi:
14432 if (htab->symbian_p)
14433 goto get_vma;
14434 break;
14435
14436 case DT_PLTRELSZ:
14437 s = htab->root.srelplt;
14438 BFD_ASSERT (s != NULL);
14439 dyn.d_un.d_val = s->size;
14440 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14441 break;
14442
14443 case DT_RELSZ:
14444 case DT_RELASZ:
14445 if (!htab->symbian_p)
14446 {
14447 /* My reading of the SVR4 ABI indicates that the
14448 procedure linkage table relocs (DT_JMPREL) should be
14449 included in the overall relocs (DT_REL). This is
14450 what Solaris does. However, UnixWare can not handle
14451 that case. Therefore, we override the DT_RELSZ entry
14452 here to make it not include the JMPREL relocs. Since
14453 the linker script arranges for .rel(a).plt to follow all
14454 other relocation sections, we don't have to worry
14455 about changing the DT_REL entry. */
14456 s = htab->root.srelplt;
14457 if (s != NULL)
14458 dyn.d_un.d_val -= s->size;
14459 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14460 break;
14461 }
14462 /* Fall through. */
14463
14464 case DT_REL:
14465 case DT_RELA:
14466 /* In the BPABI, the DT_REL tag must point at the file
14467 offset, not the VMA, of the first relocation
14468 section. So, we use code similar to that in
14469 elflink.c, but do not check for SHF_ALLOC on the
14470 relcoation section, since relocations sections are
14471 never allocated under the BPABI. The comments above
14472 about Unixware notwithstanding, we include all of the
14473 relocations here. */
14474 if (htab->symbian_p)
14475 {
14476 unsigned int i;
14477 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14478 ? SHT_REL : SHT_RELA);
14479 dyn.d_un.d_val = 0;
14480 for (i = 1; i < elf_numsections (output_bfd); i++)
14481 {
14482 Elf_Internal_Shdr *hdr
14483 = elf_elfsections (output_bfd)[i];
14484 if (hdr->sh_type == type)
14485 {
14486 if (dyn.d_tag == DT_RELSZ
14487 || dyn.d_tag == DT_RELASZ)
14488 dyn.d_un.d_val += hdr->sh_size;
14489 else if ((ufile_ptr) hdr->sh_offset
14490 <= dyn.d_un.d_val - 1)
14491 dyn.d_un.d_val = hdr->sh_offset;
14492 }
14493 }
14494 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14495 }
14496 break;
14497
14498 case DT_TLSDESC_PLT:
14499 s = htab->root.splt;
14500 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14501 + htab->dt_tlsdesc_plt);
14502 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14503 break;
14504
14505 case DT_TLSDESC_GOT:
14506 s = htab->root.sgot;
14507 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14508 + htab->dt_tlsdesc_got);
14509 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14510 break;
14511
14512 /* Set the bottom bit of DT_INIT/FINI if the
14513 corresponding function is Thumb. */
14514 case DT_INIT:
14515 name = info->init_function;
14516 goto get_sym;
14517 case DT_FINI:
14518 name = info->fini_function;
14519 get_sym:
14520 /* If it wasn't set by elf_bfd_final_link
14521 then there is nothing to adjust. */
14522 if (dyn.d_un.d_val != 0)
14523 {
14524 struct elf_link_hash_entry * eh;
14525
14526 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14527 FALSE, FALSE, TRUE);
14528 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14529 {
14530 dyn.d_un.d_val |= 1;
14531 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14532 }
14533 }
14534 break;
14535 }
14536 }
14537
14538 /* Fill in the first entry in the procedure linkage table. */
14539 if (splt->size > 0 && htab->plt_header_size)
14540 {
14541 const bfd_vma *plt0_entry;
14542 bfd_vma got_address, plt_address, got_displacement;
14543
14544 /* Calculate the addresses of the GOT and PLT. */
14545 got_address = sgot->output_section->vma + sgot->output_offset;
14546 plt_address = splt->output_section->vma + splt->output_offset;
14547
14548 if (htab->vxworks_p)
14549 {
14550 /* The VxWorks GOT is relocated by the dynamic linker.
14551 Therefore, we must emit relocations rather than simply
14552 computing the values now. */
14553 Elf_Internal_Rela rel;
14554
14555 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14556 put_arm_insn (htab, output_bfd, plt0_entry[0],
14557 splt->contents + 0);
14558 put_arm_insn (htab, output_bfd, plt0_entry[1],
14559 splt->contents + 4);
14560 put_arm_insn (htab, output_bfd, plt0_entry[2],
14561 splt->contents + 8);
14562 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14563
14564 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14565 rel.r_offset = plt_address + 12;
14566 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14567 rel.r_addend = 0;
14568 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14569 htab->srelplt2->contents);
14570 }
14571 else if (htab->nacl_p)
14572 arm_nacl_put_plt0 (htab, output_bfd, splt,
14573 got_address + 8 - (plt_address + 16));
14574 else if (using_thumb_only (htab))
14575 {
14576 got_displacement = got_address - (plt_address + 12);
14577
14578 plt0_entry = elf32_thumb2_plt0_entry;
14579 put_arm_insn (htab, output_bfd, plt0_entry[0],
14580 splt->contents + 0);
14581 put_arm_insn (htab, output_bfd, plt0_entry[1],
14582 splt->contents + 4);
14583 put_arm_insn (htab, output_bfd, plt0_entry[2],
14584 splt->contents + 8);
14585
14586 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
14587 }
14588 else
14589 {
14590 got_displacement = got_address - (plt_address + 16);
14591
14592 plt0_entry = elf32_arm_plt0_entry;
14593 put_arm_insn (htab, output_bfd, plt0_entry[0],
14594 splt->contents + 0);
14595 put_arm_insn (htab, output_bfd, plt0_entry[1],
14596 splt->contents + 4);
14597 put_arm_insn (htab, output_bfd, plt0_entry[2],
14598 splt->contents + 8);
14599 put_arm_insn (htab, output_bfd, plt0_entry[3],
14600 splt->contents + 12);
14601
14602 #ifdef FOUR_WORD_PLT
14603 /* The displacement value goes in the otherwise-unused
14604 last word of the second entry. */
14605 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14606 #else
14607 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14608 #endif
14609 }
14610 }
14611
14612 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14613 really seem like the right value. */
14614 if (splt->output_section->owner == output_bfd)
14615 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14616
14617 if (htab->dt_tlsdesc_plt)
14618 {
14619 bfd_vma got_address
14620 = sgot->output_section->vma + sgot->output_offset;
14621 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14622 + htab->root.sgot->output_offset);
14623 bfd_vma plt_address
14624 = splt->output_section->vma + splt->output_offset;
14625
14626 arm_put_trampoline (htab, output_bfd,
14627 splt->contents + htab->dt_tlsdesc_plt,
14628 dl_tlsdesc_lazy_trampoline, 6);
14629
14630 bfd_put_32 (output_bfd,
14631 gotplt_address + htab->dt_tlsdesc_got
14632 - (plt_address + htab->dt_tlsdesc_plt)
14633 - dl_tlsdesc_lazy_trampoline[6],
14634 splt->contents + htab->dt_tlsdesc_plt + 24);
14635 bfd_put_32 (output_bfd,
14636 got_address - (plt_address + htab->dt_tlsdesc_plt)
14637 - dl_tlsdesc_lazy_trampoline[7],
14638 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14639 }
14640
14641 if (htab->tls_trampoline)
14642 {
14643 arm_put_trampoline (htab, output_bfd,
14644 splt->contents + htab->tls_trampoline,
14645 tls_trampoline, 3);
14646 #ifdef FOUR_WORD_PLT
14647 bfd_put_32 (output_bfd, 0x00000000,
14648 splt->contents + htab->tls_trampoline + 12);
14649 #endif
14650 }
14651
14652 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14653 {
14654 /* Correct the .rel(a).plt.unloaded relocations. They will have
14655 incorrect symbol indexes. */
14656 int num_plts;
14657 unsigned char *p;
14658
14659 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14660 / htab->plt_entry_size);
14661 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14662
14663 for (; num_plts; num_plts--)
14664 {
14665 Elf_Internal_Rela rel;
14666
14667 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14668 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14669 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14670 p += RELOC_SIZE (htab);
14671
14672 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14673 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14674 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14675 p += RELOC_SIZE (htab);
14676 }
14677 }
14678 }
14679
14680 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
14681 /* NaCl uses a special first entry in .iplt too. */
14682 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
14683
14684 /* Fill in the first three entries in the global offset table. */
14685 if (sgot)
14686 {
14687 if (sgot->size > 0)
14688 {
14689 if (sdyn == NULL)
14690 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14691 else
14692 bfd_put_32 (output_bfd,
14693 sdyn->output_section->vma + sdyn->output_offset,
14694 sgot->contents);
14695 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14696 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14697 }
14698
14699 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14700 }
14701
14702 return TRUE;
14703 }
14704
14705 static void
14706 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14707 {
14708 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14709 struct elf32_arm_link_hash_table *globals;
14710
14711 i_ehdrp = elf_elfheader (abfd);
14712
14713 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14714 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14715 else
14716 _bfd_elf_post_process_headers (abfd, link_info);
14717 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14718
14719 if (link_info)
14720 {
14721 globals = elf32_arm_hash_table (link_info);
14722 if (globals != NULL && globals->byteswap_code)
14723 i_ehdrp->e_flags |= EF_ARM_BE8;
14724 }
14725
14726 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
14727 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
14728 {
14729 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
14730 if (abi == AEABI_VFP_args_vfp)
14731 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
14732 else
14733 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
14734 }
14735 }
14736
14737 static enum elf_reloc_type_class
14738 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
14739 const asection *rel_sec ATTRIBUTE_UNUSED,
14740 const Elf_Internal_Rela *rela)
14741 {
14742 switch ((int) ELF32_R_TYPE (rela->r_info))
14743 {
14744 case R_ARM_RELATIVE:
14745 return reloc_class_relative;
14746 case R_ARM_JUMP_SLOT:
14747 return reloc_class_plt;
14748 case R_ARM_COPY:
14749 return reloc_class_copy;
14750 default:
14751 return reloc_class_normal;
14752 }
14753 }
14754
14755 static void
14756 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14757 {
14758 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14759 }
14760
14761 /* Return TRUE if this is an unwinding table entry. */
14762
14763 static bfd_boolean
14764 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14765 {
14766 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14767 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14768 }
14769
14770
14771 /* Set the type and flags for an ARM section. We do this by
14772 the section name, which is a hack, but ought to work. */
14773
14774 static bfd_boolean
14775 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14776 {
14777 const char * name;
14778
14779 name = bfd_get_section_name (abfd, sec);
14780
14781 if (is_arm_elf_unwind_section_name (abfd, name))
14782 {
14783 hdr->sh_type = SHT_ARM_EXIDX;
14784 hdr->sh_flags |= SHF_LINK_ORDER;
14785 }
14786 return TRUE;
14787 }
14788
14789 /* Handle an ARM specific section when reading an object file. This is
14790 called when bfd_section_from_shdr finds a section with an unknown
14791 type. */
14792
14793 static bfd_boolean
14794 elf32_arm_section_from_shdr (bfd *abfd,
14795 Elf_Internal_Shdr * hdr,
14796 const char *name,
14797 int shindex)
14798 {
14799 /* There ought to be a place to keep ELF backend specific flags, but
14800 at the moment there isn't one. We just keep track of the
14801 sections by their name, instead. Fortunately, the ABI gives
14802 names for all the ARM specific sections, so we will probably get
14803 away with this. */
14804 switch (hdr->sh_type)
14805 {
14806 case SHT_ARM_EXIDX:
14807 case SHT_ARM_PREEMPTMAP:
14808 case SHT_ARM_ATTRIBUTES:
14809 break;
14810
14811 default:
14812 return FALSE;
14813 }
14814
14815 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14816 return FALSE;
14817
14818 return TRUE;
14819 }
14820
14821 static _arm_elf_section_data *
14822 get_arm_elf_section_data (asection * sec)
14823 {
14824 if (sec && sec->owner && is_arm_elf (sec->owner))
14825 return elf32_arm_section_data (sec);
14826 else
14827 return NULL;
14828 }
14829
14830 typedef struct
14831 {
14832 void *flaginfo;
14833 struct bfd_link_info *info;
14834 asection *sec;
14835 int sec_shndx;
14836 int (*func) (void *, const char *, Elf_Internal_Sym *,
14837 asection *, struct elf_link_hash_entry *);
14838 } output_arch_syminfo;
14839
14840 enum map_symbol_type
14841 {
14842 ARM_MAP_ARM,
14843 ARM_MAP_THUMB,
14844 ARM_MAP_DATA
14845 };
14846
14847
14848 /* Output a single mapping symbol. */
14849
14850 static bfd_boolean
14851 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14852 enum map_symbol_type type,
14853 bfd_vma offset)
14854 {
14855 static const char *names[3] = {"$a", "$t", "$d"};
14856 Elf_Internal_Sym sym;
14857
14858 sym.st_value = osi->sec->output_section->vma
14859 + osi->sec->output_offset
14860 + offset;
14861 sym.st_size = 0;
14862 sym.st_other = 0;
14863 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14864 sym.st_shndx = osi->sec_shndx;
14865 sym.st_target_internal = 0;
14866 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14867 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14868 }
14869
14870 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14871 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14872
14873 static bfd_boolean
14874 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14875 bfd_boolean is_iplt_entry_p,
14876 union gotplt_union *root_plt,
14877 struct arm_plt_info *arm_plt)
14878 {
14879 struct elf32_arm_link_hash_table *htab;
14880 bfd_vma addr, plt_header_size;
14881
14882 if (root_plt->offset == (bfd_vma) -1)
14883 return TRUE;
14884
14885 htab = elf32_arm_hash_table (osi->info);
14886 if (htab == NULL)
14887 return FALSE;
14888
14889 if (is_iplt_entry_p)
14890 {
14891 osi->sec = htab->root.iplt;
14892 plt_header_size = 0;
14893 }
14894 else
14895 {
14896 osi->sec = htab->root.splt;
14897 plt_header_size = htab->plt_header_size;
14898 }
14899 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14900 (osi->info->output_bfd, osi->sec->output_section));
14901
14902 addr = root_plt->offset & -2;
14903 if (htab->symbian_p)
14904 {
14905 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14906 return FALSE;
14907 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14908 return FALSE;
14909 }
14910 else if (htab->vxworks_p)
14911 {
14912 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14913 return FALSE;
14914 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14915 return FALSE;
14916 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14917 return FALSE;
14918 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14919 return FALSE;
14920 }
14921 else if (htab->nacl_p)
14922 {
14923 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14924 return FALSE;
14925 }
14926 else if (using_thumb_only (htab))
14927 {
14928 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
14929 return FALSE;
14930 }
14931 else
14932 {
14933 bfd_boolean thumb_stub_p;
14934
14935 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14936 if (thumb_stub_p)
14937 {
14938 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14939 return FALSE;
14940 }
14941 #ifdef FOUR_WORD_PLT
14942 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14943 return FALSE;
14944 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14945 return FALSE;
14946 #else
14947 /* A three-word PLT with no Thumb thunk contains only Arm code,
14948 so only need to output a mapping symbol for the first PLT entry and
14949 entries with thumb thunks. */
14950 if (thumb_stub_p || addr == plt_header_size)
14951 {
14952 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14953 return FALSE;
14954 }
14955 #endif
14956 }
14957
14958 return TRUE;
14959 }
14960
14961 /* Output mapping symbols for PLT entries associated with H. */
14962
14963 static bfd_boolean
14964 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14965 {
14966 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14967 struct elf32_arm_link_hash_entry *eh;
14968
14969 if (h->root.type == bfd_link_hash_indirect)
14970 return TRUE;
14971
14972 if (h->root.type == bfd_link_hash_warning)
14973 /* When warning symbols are created, they **replace** the "real"
14974 entry in the hash table, thus we never get to see the real
14975 symbol in a hash traversal. So look at it now. */
14976 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14977
14978 eh = (struct elf32_arm_link_hash_entry *) h;
14979 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14980 &h->plt, &eh->plt);
14981 }
14982
14983 /* Output a single local symbol for a generated stub. */
14984
14985 static bfd_boolean
14986 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14987 bfd_vma offset, bfd_vma size)
14988 {
14989 Elf_Internal_Sym sym;
14990
14991 sym.st_value = osi->sec->output_section->vma
14992 + osi->sec->output_offset
14993 + offset;
14994 sym.st_size = size;
14995 sym.st_other = 0;
14996 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14997 sym.st_shndx = osi->sec_shndx;
14998 sym.st_target_internal = 0;
14999 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
15000 }
15001
15002 static bfd_boolean
15003 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
15004 void * in_arg)
15005 {
15006 struct elf32_arm_stub_hash_entry *stub_entry;
15007 asection *stub_sec;
15008 bfd_vma addr;
15009 char *stub_name;
15010 output_arch_syminfo *osi;
15011 const insn_sequence *template_sequence;
15012 enum stub_insn_type prev_type;
15013 int size;
15014 int i;
15015 enum map_symbol_type sym_type;
15016
15017 /* Massage our args to the form they really have. */
15018 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15019 osi = (output_arch_syminfo *) in_arg;
15020
15021 stub_sec = stub_entry->stub_sec;
15022
15023 /* Ensure this stub is attached to the current section being
15024 processed. */
15025 if (stub_sec != osi->sec)
15026 return TRUE;
15027
15028 addr = (bfd_vma) stub_entry->stub_offset;
15029 stub_name = stub_entry->output_name;
15030
15031 template_sequence = stub_entry->stub_template;
15032 switch (template_sequence[0].type)
15033 {
15034 case ARM_TYPE:
15035 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
15036 return FALSE;
15037 break;
15038 case THUMB16_TYPE:
15039 case THUMB32_TYPE:
15040 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
15041 stub_entry->stub_size))
15042 return FALSE;
15043 break;
15044 default:
15045 BFD_FAIL ();
15046 return 0;
15047 }
15048
15049 prev_type = DATA_TYPE;
15050 size = 0;
15051 for (i = 0; i < stub_entry->stub_template_size; i++)
15052 {
15053 switch (template_sequence[i].type)
15054 {
15055 case ARM_TYPE:
15056 sym_type = ARM_MAP_ARM;
15057 break;
15058
15059 case THUMB16_TYPE:
15060 case THUMB32_TYPE:
15061 sym_type = ARM_MAP_THUMB;
15062 break;
15063
15064 case DATA_TYPE:
15065 sym_type = ARM_MAP_DATA;
15066 break;
15067
15068 default:
15069 BFD_FAIL ();
15070 return FALSE;
15071 }
15072
15073 if (template_sequence[i].type != prev_type)
15074 {
15075 prev_type = template_sequence[i].type;
15076 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
15077 return FALSE;
15078 }
15079
15080 switch (template_sequence[i].type)
15081 {
15082 case ARM_TYPE:
15083 case THUMB32_TYPE:
15084 size += 4;
15085 break;
15086
15087 case THUMB16_TYPE:
15088 size += 2;
15089 break;
15090
15091 case DATA_TYPE:
15092 size += 4;
15093 break;
15094
15095 default:
15096 BFD_FAIL ();
15097 return FALSE;
15098 }
15099 }
15100
15101 return TRUE;
15102 }
15103
15104 /* Output mapping symbols for linker generated sections,
15105 and for those data-only sections that do not have a
15106 $d. */
15107
15108 static bfd_boolean
15109 elf32_arm_output_arch_local_syms (bfd *output_bfd,
15110 struct bfd_link_info *info,
15111 void *flaginfo,
15112 int (*func) (void *, const char *,
15113 Elf_Internal_Sym *,
15114 asection *,
15115 struct elf_link_hash_entry *))
15116 {
15117 output_arch_syminfo osi;
15118 struct elf32_arm_link_hash_table *htab;
15119 bfd_vma offset;
15120 bfd_size_type size;
15121 bfd *input_bfd;
15122
15123 htab = elf32_arm_hash_table (info);
15124 if (htab == NULL)
15125 return FALSE;
15126
15127 check_use_blx (htab);
15128
15129 osi.flaginfo = flaginfo;
15130 osi.info = info;
15131 osi.func = func;
15132
15133 /* Add a $d mapping symbol to data-only sections that
15134 don't have any mapping symbol. This may result in (harmless) redundant
15135 mapping symbols. */
15136 for (input_bfd = info->input_bfds;
15137 input_bfd != NULL;
15138 input_bfd = input_bfd->link.next)
15139 {
15140 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
15141 for (osi.sec = input_bfd->sections;
15142 osi.sec != NULL;
15143 osi.sec = osi.sec->next)
15144 {
15145 if (osi.sec->output_section != NULL
15146 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
15147 != 0)
15148 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
15149 == SEC_HAS_CONTENTS
15150 && get_arm_elf_section_data (osi.sec) != NULL
15151 && get_arm_elf_section_data (osi.sec)->mapcount == 0
15152 && osi.sec->size > 0
15153 && (osi.sec->flags & SEC_EXCLUDE) == 0)
15154 {
15155 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15156 (output_bfd, osi.sec->output_section);
15157 if (osi.sec_shndx != (int)SHN_BAD)
15158 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
15159 }
15160 }
15161 }
15162
15163 /* ARM->Thumb glue. */
15164 if (htab->arm_glue_size > 0)
15165 {
15166 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15167 ARM2THUMB_GLUE_SECTION_NAME);
15168
15169 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15170 (output_bfd, osi.sec->output_section);
15171 if (info->shared || htab->root.is_relocatable_executable
15172 || htab->pic_veneer)
15173 size = ARM2THUMB_PIC_GLUE_SIZE;
15174 else if (htab->use_blx)
15175 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
15176 else
15177 size = ARM2THUMB_STATIC_GLUE_SIZE;
15178
15179 for (offset = 0; offset < htab->arm_glue_size; offset += size)
15180 {
15181 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
15182 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
15183 }
15184 }
15185
15186 /* Thumb->ARM glue. */
15187 if (htab->thumb_glue_size > 0)
15188 {
15189 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15190 THUMB2ARM_GLUE_SECTION_NAME);
15191
15192 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15193 (output_bfd, osi.sec->output_section);
15194 size = THUMB2ARM_GLUE_SIZE;
15195
15196 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
15197 {
15198 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
15199 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
15200 }
15201 }
15202
15203 /* ARMv4 BX veneers. */
15204 if (htab->bx_glue_size > 0)
15205 {
15206 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
15207 ARM_BX_GLUE_SECTION_NAME);
15208
15209 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15210 (output_bfd, osi.sec->output_section);
15211
15212 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
15213 }
15214
15215 /* Long calls stubs. */
15216 if (htab->stub_bfd && htab->stub_bfd->sections)
15217 {
15218 asection* stub_sec;
15219
15220 for (stub_sec = htab->stub_bfd->sections;
15221 stub_sec != NULL;
15222 stub_sec = stub_sec->next)
15223 {
15224 /* Ignore non-stub sections. */
15225 if (!strstr (stub_sec->name, STUB_SUFFIX))
15226 continue;
15227
15228 osi.sec = stub_sec;
15229
15230 osi.sec_shndx = _bfd_elf_section_from_bfd_section
15231 (output_bfd, osi.sec->output_section);
15232
15233 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
15234 }
15235 }
15236
15237 /* Finally, output mapping symbols for the PLT. */
15238 if (htab->root.splt && htab->root.splt->size > 0)
15239 {
15240 osi.sec = htab->root.splt;
15241 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
15242 (output_bfd, osi.sec->output_section));
15243
15244 /* Output mapping symbols for the plt header. SymbianOS does not have a
15245 plt header. */
15246 if (htab->vxworks_p)
15247 {
15248 /* VxWorks shared libraries have no PLT header. */
15249 if (!info->shared)
15250 {
15251 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15252 return FALSE;
15253 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
15254 return FALSE;
15255 }
15256 }
15257 else if (htab->nacl_p)
15258 {
15259 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15260 return FALSE;
15261 }
15262 else if (using_thumb_only (htab))
15263 {
15264 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
15265 return FALSE;
15266 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
15267 return FALSE;
15268 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
15269 return FALSE;
15270 }
15271 else if (!htab->symbian_p)
15272 {
15273 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15274 return FALSE;
15275 #ifndef FOUR_WORD_PLT
15276 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
15277 return FALSE;
15278 #endif
15279 }
15280 }
15281 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
15282 {
15283 /* NaCl uses a special first entry in .iplt too. */
15284 osi.sec = htab->root.iplt;
15285 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
15286 (output_bfd, osi.sec->output_section));
15287 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
15288 return FALSE;
15289 }
15290 if ((htab->root.splt && htab->root.splt->size > 0)
15291 || (htab->root.iplt && htab->root.iplt->size > 0))
15292 {
15293 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
15294 for (input_bfd = info->input_bfds;
15295 input_bfd != NULL;
15296 input_bfd = input_bfd->link.next)
15297 {
15298 struct arm_local_iplt_info **local_iplt;
15299 unsigned int i, num_syms;
15300
15301 local_iplt = elf32_arm_local_iplt (input_bfd);
15302 if (local_iplt != NULL)
15303 {
15304 num_syms = elf_symtab_hdr (input_bfd).sh_info;
15305 for (i = 0; i < num_syms; i++)
15306 if (local_iplt[i] != NULL
15307 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
15308 &local_iplt[i]->root,
15309 &local_iplt[i]->arm))
15310 return FALSE;
15311 }
15312 }
15313 }
15314 if (htab->dt_tlsdesc_plt != 0)
15315 {
15316 /* Mapping symbols for the lazy tls trampoline. */
15317 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
15318 return FALSE;
15319
15320 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15321 htab->dt_tlsdesc_plt + 24))
15322 return FALSE;
15323 }
15324 if (htab->tls_trampoline != 0)
15325 {
15326 /* Mapping symbols for the tls trampoline. */
15327 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
15328 return FALSE;
15329 #ifdef FOUR_WORD_PLT
15330 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
15331 htab->tls_trampoline + 12))
15332 return FALSE;
15333 #endif
15334 }
15335
15336 return TRUE;
15337 }
15338
15339 /* Allocate target specific section data. */
15340
15341 static bfd_boolean
15342 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
15343 {
15344 if (!sec->used_by_bfd)
15345 {
15346 _arm_elf_section_data *sdata;
15347 bfd_size_type amt = sizeof (*sdata);
15348
15349 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
15350 if (sdata == NULL)
15351 return FALSE;
15352 sec->used_by_bfd = sdata;
15353 }
15354
15355 return _bfd_elf_new_section_hook (abfd, sec);
15356 }
15357
15358
15359 /* Used to order a list of mapping symbols by address. */
15360
15361 static int
15362 elf32_arm_compare_mapping (const void * a, const void * b)
15363 {
15364 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
15365 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
15366
15367 if (amap->vma > bmap->vma)
15368 return 1;
15369 else if (amap->vma < bmap->vma)
15370 return -1;
15371 else if (amap->type > bmap->type)
15372 /* Ensure results do not depend on the host qsort for objects with
15373 multiple mapping symbols at the same address by sorting on type
15374 after vma. */
15375 return 1;
15376 else if (amap->type < bmap->type)
15377 return -1;
15378 else
15379 return 0;
15380 }
15381
15382 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15383
15384 static unsigned long
15385 offset_prel31 (unsigned long addr, bfd_vma offset)
15386 {
15387 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15388 }
15389
15390 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15391 relocations. */
15392
15393 static void
15394 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15395 {
15396 unsigned long first_word = bfd_get_32 (output_bfd, from);
15397 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15398
15399 /* High bit of first word is supposed to be zero. */
15400 if ((first_word & 0x80000000ul) == 0)
15401 first_word = offset_prel31 (first_word, offset);
15402
15403 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15404 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15405 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15406 second_word = offset_prel31 (second_word, offset);
15407
15408 bfd_put_32 (output_bfd, first_word, to);
15409 bfd_put_32 (output_bfd, second_word, to + 4);
15410 }
15411
15412 /* Data for make_branch_to_a8_stub(). */
15413
15414 struct a8_branch_to_stub_data
15415 {
15416 asection *writing_section;
15417 bfd_byte *contents;
15418 };
15419
15420
15421 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15422 places for a particular section. */
15423
15424 static bfd_boolean
15425 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15426 void *in_arg)
15427 {
15428 struct elf32_arm_stub_hash_entry *stub_entry;
15429 struct a8_branch_to_stub_data *data;
15430 bfd_byte *contents;
15431 unsigned long branch_insn;
15432 bfd_vma veneered_insn_loc, veneer_entry_loc;
15433 bfd_signed_vma branch_offset;
15434 bfd *abfd;
15435 unsigned int target;
15436
15437 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15438 data = (struct a8_branch_to_stub_data *) in_arg;
15439
15440 if (stub_entry->target_section != data->writing_section
15441 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15442 return TRUE;
15443
15444 contents = data->contents;
15445
15446 veneered_insn_loc = stub_entry->target_section->output_section->vma
15447 + stub_entry->target_section->output_offset
15448 + stub_entry->target_value;
15449
15450 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15451 + stub_entry->stub_sec->output_offset
15452 + stub_entry->stub_offset;
15453
15454 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15455 veneered_insn_loc &= ~3u;
15456
15457 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15458
15459 abfd = stub_entry->target_section->owner;
15460 target = stub_entry->target_value;
15461
15462 /* We attempt to avoid this condition by setting stubs_always_after_branch
15463 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15464 This check is just to be on the safe side... */
15465 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15466 {
15467 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15468 "allocated in unsafe location"), abfd);
15469 return FALSE;
15470 }
15471
15472 switch (stub_entry->stub_type)
15473 {
15474 case arm_stub_a8_veneer_b:
15475 case arm_stub_a8_veneer_b_cond:
15476 branch_insn = 0xf0009000;
15477 goto jump24;
15478
15479 case arm_stub_a8_veneer_blx:
15480 branch_insn = 0xf000e800;
15481 goto jump24;
15482
15483 case arm_stub_a8_veneer_bl:
15484 {
15485 unsigned int i1, j1, i2, j2, s;
15486
15487 branch_insn = 0xf000d000;
15488
15489 jump24:
15490 if (branch_offset < -16777216 || branch_offset > 16777214)
15491 {
15492 /* There's not much we can do apart from complain if this
15493 happens. */
15494 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15495 "of range (input file too large)"), abfd);
15496 return FALSE;
15497 }
15498
15499 /* i1 = not(j1 eor s), so:
15500 not i1 = j1 eor s
15501 j1 = (not i1) eor s. */
15502
15503 branch_insn |= (branch_offset >> 1) & 0x7ff;
15504 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15505 i2 = (branch_offset >> 22) & 1;
15506 i1 = (branch_offset >> 23) & 1;
15507 s = (branch_offset >> 24) & 1;
15508 j1 = (!i1) ^ s;
15509 j2 = (!i2) ^ s;
15510 branch_insn |= j2 << 11;
15511 branch_insn |= j1 << 13;
15512 branch_insn |= s << 26;
15513 }
15514 break;
15515
15516 default:
15517 BFD_FAIL ();
15518 return FALSE;
15519 }
15520
15521 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15522 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15523
15524 return TRUE;
15525 }
15526
15527 /* Do code byteswapping. Return FALSE afterwards so that the section is
15528 written out as normal. */
15529
15530 static bfd_boolean
15531 elf32_arm_write_section (bfd *output_bfd,
15532 struct bfd_link_info *link_info,
15533 asection *sec,
15534 bfd_byte *contents)
15535 {
15536 unsigned int mapcount, errcount;
15537 _arm_elf_section_data *arm_data;
15538 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15539 elf32_arm_section_map *map;
15540 elf32_vfp11_erratum_list *errnode;
15541 bfd_vma ptr;
15542 bfd_vma end;
15543 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15544 bfd_byte tmp;
15545 unsigned int i;
15546
15547 if (globals == NULL)
15548 return FALSE;
15549
15550 /* If this section has not been allocated an _arm_elf_section_data
15551 structure then we cannot record anything. */
15552 arm_data = get_arm_elf_section_data (sec);
15553 if (arm_data == NULL)
15554 return FALSE;
15555
15556 mapcount = arm_data->mapcount;
15557 map = arm_data->map;
15558 errcount = arm_data->erratumcount;
15559
15560 if (errcount != 0)
15561 {
15562 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15563
15564 for (errnode = arm_data->erratumlist; errnode != 0;
15565 errnode = errnode->next)
15566 {
15567 bfd_vma target = errnode->vma - offset;
15568
15569 switch (errnode->type)
15570 {
15571 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15572 {
15573 bfd_vma branch_to_veneer;
15574 /* Original condition code of instruction, plus bit mask for
15575 ARM B instruction. */
15576 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15577 | 0x0a000000;
15578
15579 /* The instruction is before the label. */
15580 target -= 4;
15581
15582 /* Above offset included in -4 below. */
15583 branch_to_veneer = errnode->u.b.veneer->vma
15584 - errnode->vma - 4;
15585
15586 if ((signed) branch_to_veneer < -(1 << 25)
15587 || (signed) branch_to_veneer >= (1 << 25))
15588 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15589 "range"), output_bfd);
15590
15591 insn |= (branch_to_veneer >> 2) & 0xffffff;
15592 contents[endianflip ^ target] = insn & 0xff;
15593 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15594 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15595 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15596 }
15597 break;
15598
15599 case VFP11_ERRATUM_ARM_VENEER:
15600 {
15601 bfd_vma branch_from_veneer;
15602 unsigned int insn;
15603
15604 /* Take size of veneer into account. */
15605 branch_from_veneer = errnode->u.v.branch->vma
15606 - errnode->vma - 12;
15607
15608 if ((signed) branch_from_veneer < -(1 << 25)
15609 || (signed) branch_from_veneer >= (1 << 25))
15610 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15611 "range"), output_bfd);
15612
15613 /* Original instruction. */
15614 insn = errnode->u.v.branch->u.b.vfp_insn;
15615 contents[endianflip ^ target] = insn & 0xff;
15616 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15617 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15618 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15619
15620 /* Branch back to insn after original insn. */
15621 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15622 contents[endianflip ^ (target + 4)] = insn & 0xff;
15623 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15624 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15625 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15626 }
15627 break;
15628
15629 default:
15630 abort ();
15631 }
15632 }
15633 }
15634
15635 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15636 {
15637 arm_unwind_table_edit *edit_node
15638 = arm_data->u.exidx.unwind_edit_list;
15639 /* Now, sec->size is the size of the section we will write. The original
15640 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15641 markers) was sec->rawsize. (This isn't the case if we perform no
15642 edits, then rawsize will be zero and we should use size). */
15643 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15644 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15645 unsigned int in_index, out_index;
15646 bfd_vma add_to_offsets = 0;
15647
15648 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15649 {
15650 if (edit_node)
15651 {
15652 unsigned int edit_index = edit_node->index;
15653
15654 if (in_index < edit_index && in_index * 8 < input_size)
15655 {
15656 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15657 contents + in_index * 8, add_to_offsets);
15658 out_index++;
15659 in_index++;
15660 }
15661 else if (in_index == edit_index
15662 || (in_index * 8 >= input_size
15663 && edit_index == UINT_MAX))
15664 {
15665 switch (edit_node->type)
15666 {
15667 case DELETE_EXIDX_ENTRY:
15668 in_index++;
15669 add_to_offsets += 8;
15670 break;
15671
15672 case INSERT_EXIDX_CANTUNWIND_AT_END:
15673 {
15674 asection *text_sec = edit_node->linked_section;
15675 bfd_vma text_offset = text_sec->output_section->vma
15676 + text_sec->output_offset
15677 + text_sec->size;
15678 bfd_vma exidx_offset = offset + out_index * 8;
15679 unsigned long prel31_offset;
15680
15681 /* Note: this is meant to be equivalent to an
15682 R_ARM_PREL31 relocation. These synthetic
15683 EXIDX_CANTUNWIND markers are not relocated by the
15684 usual BFD method. */
15685 prel31_offset = (text_offset - exidx_offset)
15686 & 0x7ffffffful;
15687
15688 /* First address we can't unwind. */
15689 bfd_put_32 (output_bfd, prel31_offset,
15690 &edited_contents[out_index * 8]);
15691
15692 /* Code for EXIDX_CANTUNWIND. */
15693 bfd_put_32 (output_bfd, 0x1,
15694 &edited_contents[out_index * 8 + 4]);
15695
15696 out_index++;
15697 add_to_offsets -= 8;
15698 }
15699 break;
15700 }
15701
15702 edit_node = edit_node->next;
15703 }
15704 }
15705 else
15706 {
15707 /* No more edits, copy remaining entries verbatim. */
15708 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15709 contents + in_index * 8, add_to_offsets);
15710 out_index++;
15711 in_index++;
15712 }
15713 }
15714
15715 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15716 bfd_set_section_contents (output_bfd, sec->output_section,
15717 edited_contents,
15718 (file_ptr) sec->output_offset, sec->size);
15719
15720 return TRUE;
15721 }
15722
15723 /* Fix code to point to Cortex-A8 erratum stubs. */
15724 if (globals->fix_cortex_a8)
15725 {
15726 struct a8_branch_to_stub_data data;
15727
15728 data.writing_section = sec;
15729 data.contents = contents;
15730
15731 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15732 &data);
15733 }
15734
15735 if (mapcount == 0)
15736 return FALSE;
15737
15738 if (globals->byteswap_code)
15739 {
15740 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15741
15742 ptr = map[0].vma;
15743 for (i = 0; i < mapcount; i++)
15744 {
15745 if (i == mapcount - 1)
15746 end = sec->size;
15747 else
15748 end = map[i + 1].vma;
15749
15750 switch (map[i].type)
15751 {
15752 case 'a':
15753 /* Byte swap code words. */
15754 while (ptr + 3 < end)
15755 {
15756 tmp = contents[ptr];
15757 contents[ptr] = contents[ptr + 3];
15758 contents[ptr + 3] = tmp;
15759 tmp = contents[ptr + 1];
15760 contents[ptr + 1] = contents[ptr + 2];
15761 contents[ptr + 2] = tmp;
15762 ptr += 4;
15763 }
15764 break;
15765
15766 case 't':
15767 /* Byte swap code halfwords. */
15768 while (ptr + 1 < end)
15769 {
15770 tmp = contents[ptr];
15771 contents[ptr] = contents[ptr + 1];
15772 contents[ptr + 1] = tmp;
15773 ptr += 2;
15774 }
15775 break;
15776
15777 case 'd':
15778 /* Leave data alone. */
15779 break;
15780 }
15781 ptr = end;
15782 }
15783 }
15784
15785 free (map);
15786 arm_data->mapcount = -1;
15787 arm_data->mapsize = 0;
15788 arm_data->map = NULL;
15789
15790 return FALSE;
15791 }
15792
15793 /* Mangle thumb function symbols as we read them in. */
15794
15795 static bfd_boolean
15796 elf32_arm_swap_symbol_in (bfd * abfd,
15797 const void *psrc,
15798 const void *pshn,
15799 Elf_Internal_Sym *dst)
15800 {
15801 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15802 return FALSE;
15803
15804 /* New EABI objects mark thumb function symbols by setting the low bit of
15805 the address. */
15806 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15807 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15808 {
15809 if (dst->st_value & 1)
15810 {
15811 dst->st_value &= ~(bfd_vma) 1;
15812 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15813 }
15814 else
15815 dst->st_target_internal = ST_BRANCH_TO_ARM;
15816 }
15817 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15818 {
15819 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15820 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15821 }
15822 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15823 dst->st_target_internal = ST_BRANCH_LONG;
15824 else
15825 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15826
15827 return TRUE;
15828 }
15829
15830
15831 /* Mangle thumb function symbols as we write them out. */
15832
15833 static void
15834 elf32_arm_swap_symbol_out (bfd *abfd,
15835 const Elf_Internal_Sym *src,
15836 void *cdst,
15837 void *shndx)
15838 {
15839 Elf_Internal_Sym newsym;
15840
15841 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15842 of the address set, as per the new EABI. We do this unconditionally
15843 because objcopy does not set the elf header flags until after
15844 it writes out the symbol table. */
15845 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15846 {
15847 newsym = *src;
15848 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15849 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15850 if (newsym.st_shndx != SHN_UNDEF)
15851 {
15852 /* Do this only for defined symbols. At link type, the static
15853 linker will simulate the work of dynamic linker of resolving
15854 symbols and will carry over the thumbness of found symbols to
15855 the output symbol table. It's not clear how it happens, but
15856 the thumbness of undefined symbols can well be different at
15857 runtime, and writing '1' for them will be confusing for users
15858 and possibly for dynamic linker itself.
15859 */
15860 newsym.st_value |= 1;
15861 }
15862
15863 src = &newsym;
15864 }
15865 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15866 }
15867
15868 /* Add the PT_ARM_EXIDX program header. */
15869
15870 static bfd_boolean
15871 elf32_arm_modify_segment_map (bfd *abfd,
15872 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15873 {
15874 struct elf_segment_map *m;
15875 asection *sec;
15876
15877 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15878 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15879 {
15880 /* If there is already a PT_ARM_EXIDX header, then we do not
15881 want to add another one. This situation arises when running
15882 "strip"; the input binary already has the header. */
15883 m = elf_seg_map (abfd);
15884 while (m && m->p_type != PT_ARM_EXIDX)
15885 m = m->next;
15886 if (!m)
15887 {
15888 m = (struct elf_segment_map *)
15889 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15890 if (m == NULL)
15891 return FALSE;
15892 m->p_type = PT_ARM_EXIDX;
15893 m->count = 1;
15894 m->sections[0] = sec;
15895
15896 m->next = elf_seg_map (abfd);
15897 elf_seg_map (abfd) = m;
15898 }
15899 }
15900
15901 return TRUE;
15902 }
15903
15904 /* We may add a PT_ARM_EXIDX program header. */
15905
15906 static int
15907 elf32_arm_additional_program_headers (bfd *abfd,
15908 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15909 {
15910 asection *sec;
15911
15912 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15913 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15914 return 1;
15915 else
15916 return 0;
15917 }
15918
15919 /* Hook called by the linker routine which adds symbols from an object
15920 file. */
15921
15922 static bfd_boolean
15923 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15924 Elf_Internal_Sym *sym, const char **namep,
15925 flagword *flagsp, asection **secp, bfd_vma *valp)
15926 {
15927 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15928 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
15929 && (abfd->flags & DYNAMIC) == 0
15930 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
15931 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15932
15933 if (elf32_arm_hash_table (info) == NULL)
15934 return FALSE;
15935
15936 if (elf32_arm_hash_table (info)->vxworks_p
15937 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15938 flagsp, secp, valp))
15939 return FALSE;
15940
15941 return TRUE;
15942 }
15943
15944 /* We use this to override swap_symbol_in and swap_symbol_out. */
15945 const struct elf_size_info elf32_arm_size_info =
15946 {
15947 sizeof (Elf32_External_Ehdr),
15948 sizeof (Elf32_External_Phdr),
15949 sizeof (Elf32_External_Shdr),
15950 sizeof (Elf32_External_Rel),
15951 sizeof (Elf32_External_Rela),
15952 sizeof (Elf32_External_Sym),
15953 sizeof (Elf32_External_Dyn),
15954 sizeof (Elf_External_Note),
15955 4,
15956 1,
15957 32, 2,
15958 ELFCLASS32, EV_CURRENT,
15959 bfd_elf32_write_out_phdrs,
15960 bfd_elf32_write_shdrs_and_ehdr,
15961 bfd_elf32_checksum_contents,
15962 bfd_elf32_write_relocs,
15963 elf32_arm_swap_symbol_in,
15964 elf32_arm_swap_symbol_out,
15965 bfd_elf32_slurp_reloc_table,
15966 bfd_elf32_slurp_symbol_table,
15967 bfd_elf32_swap_dyn_in,
15968 bfd_elf32_swap_dyn_out,
15969 bfd_elf32_swap_reloc_in,
15970 bfd_elf32_swap_reloc_out,
15971 bfd_elf32_swap_reloca_in,
15972 bfd_elf32_swap_reloca_out
15973 };
15974
15975 static bfd_vma
15976 read_code32 (const bfd *abfd, const bfd_byte *addr)
15977 {
15978 /* V7 BE8 code is always little endian. */
15979 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
15980 return bfd_getl32 (addr);
15981
15982 return bfd_get_32 (abfd, addr);
15983 }
15984
15985 static bfd_vma
15986 read_code16 (const bfd *abfd, const bfd_byte *addr)
15987 {
15988 /* V7 BE8 code is always little endian. */
15989 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
15990 return bfd_getl16 (addr);
15991
15992 return bfd_get_16 (abfd, addr);
15993 }
15994
15995 /* Return size of plt0 entry starting at ADDR
15996 or (bfd_vma) -1 if size can not be determined. */
15997
15998 static bfd_vma
15999 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
16000 {
16001 bfd_vma first_word;
16002 bfd_vma plt0_size;
16003
16004 first_word = read_code32 (abfd, addr);
16005
16006 if (first_word == elf32_arm_plt0_entry[0])
16007 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
16008 else if (first_word == elf32_thumb2_plt0_entry[0])
16009 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
16010 else
16011 /* We don't yet handle this PLT format. */
16012 return (bfd_vma) -1;
16013
16014 return plt0_size;
16015 }
16016
16017 /* Return size of plt entry starting at offset OFFSET
16018 of plt section located at address START
16019 or (bfd_vma) -1 if size can not be determined. */
16020
16021 static bfd_vma
16022 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
16023 {
16024 bfd_vma first_insn;
16025 bfd_vma plt_size = 0;
16026 const bfd_byte *addr = start + offset;
16027
16028 /* PLT entry size if fixed on Thumb-only platforms. */
16029 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
16030 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
16031
16032 /* Respect Thumb stub if necessary. */
16033 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
16034 {
16035 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
16036 }
16037
16038 /* Strip immediate from first add. */
16039 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
16040
16041 #ifdef FOUR_WORD_PLT
16042 if (first_insn == elf32_arm_plt_entry[0])
16043 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
16044 #else
16045 if (first_insn == elf32_arm_plt_entry_long[0])
16046 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
16047 else if (first_insn == elf32_arm_plt_entry_short[0])
16048 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
16049 #endif
16050 else
16051 /* We don't yet handle this PLT format. */
16052 return (bfd_vma) -1;
16053
16054 return plt_size;
16055 }
16056
16057 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
16058
16059 static long
16060 elf32_arm_get_synthetic_symtab (bfd *abfd,
16061 long symcount ATTRIBUTE_UNUSED,
16062 asymbol **syms ATTRIBUTE_UNUSED,
16063 long dynsymcount,
16064 asymbol **dynsyms,
16065 asymbol **ret)
16066 {
16067 asection *relplt;
16068 asymbol *s;
16069 arelent *p;
16070 long count, i, n;
16071 size_t size;
16072 Elf_Internal_Shdr *hdr;
16073 char *names;
16074 asection *plt;
16075 bfd_vma offset;
16076 bfd_byte *data;
16077
16078 *ret = NULL;
16079
16080 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
16081 return 0;
16082
16083 if (dynsymcount <= 0)
16084 return 0;
16085
16086 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
16087 if (relplt == NULL)
16088 return 0;
16089
16090 hdr = &elf_section_data (relplt)->this_hdr;
16091 if (hdr->sh_link != elf_dynsymtab (abfd)
16092 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
16093 return 0;
16094
16095 plt = bfd_get_section_by_name (abfd, ".plt");
16096 if (plt == NULL)
16097 return 0;
16098
16099 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
16100 return -1;
16101
16102 data = plt->contents;
16103 if (data == NULL)
16104 {
16105 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
16106 return -1;
16107 bfd_cache_section_contents((asection *) plt, data);
16108 }
16109
16110 count = relplt->size / hdr->sh_entsize;
16111 size = count * sizeof (asymbol);
16112 p = relplt->relocation;
16113 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
16114 {
16115 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
16116 if (p->addend != 0)
16117 size += sizeof ("+0x") - 1 + 8;
16118 }
16119
16120 s = *ret = (asymbol *) bfd_malloc (size);
16121 if (s == NULL)
16122 return -1;
16123
16124 offset = elf32_arm_plt0_size (abfd, data);
16125 if (offset == (bfd_vma) -1)
16126 return -1;
16127
16128 names = (char *) (s + count);
16129 p = relplt->relocation;
16130 n = 0;
16131 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
16132 {
16133 size_t len;
16134
16135 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
16136 if (plt_size == (bfd_vma) -1)
16137 break;
16138
16139 *s = **p->sym_ptr_ptr;
16140 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
16141 we are defining a symbol, ensure one of them is set. */
16142 if ((s->flags & BSF_LOCAL) == 0)
16143 s->flags |= BSF_GLOBAL;
16144 s->flags |= BSF_SYNTHETIC;
16145 s->section = plt;
16146 s->value = offset;
16147 s->name = names;
16148 s->udata.p = NULL;
16149 len = strlen ((*p->sym_ptr_ptr)->name);
16150 memcpy (names, (*p->sym_ptr_ptr)->name, len);
16151 names += len;
16152 if (p->addend != 0)
16153 {
16154 char buf[30], *a;
16155
16156 memcpy (names, "+0x", sizeof ("+0x") - 1);
16157 names += sizeof ("+0x") - 1;
16158 bfd_sprintf_vma (abfd, buf, p->addend);
16159 for (a = buf; *a == '0'; ++a)
16160 ;
16161 len = strlen (a);
16162 memcpy (names, a, len);
16163 names += len;
16164 }
16165 memcpy (names, "@plt", sizeof ("@plt"));
16166 names += sizeof ("@plt");
16167 ++s, ++n;
16168 offset += plt_size;
16169 }
16170
16171 return n;
16172 }
16173
16174 #define ELF_ARCH bfd_arch_arm
16175 #define ELF_TARGET_ID ARM_ELF_DATA
16176 #define ELF_MACHINE_CODE EM_ARM
16177 #ifdef __QNXTARGET__
16178 #define ELF_MAXPAGESIZE 0x1000
16179 #else
16180 #define ELF_MAXPAGESIZE 0x10000
16181 #endif
16182 #define ELF_MINPAGESIZE 0x1000
16183 #define ELF_COMMONPAGESIZE 0x1000
16184
16185 #define bfd_elf32_mkobject elf32_arm_mkobject
16186
16187 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
16188 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
16189 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
16190 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
16191 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
16192 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
16193 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
16194 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
16195 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
16196 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
16197 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
16198 #define bfd_elf32_bfd_final_link elf32_arm_final_link
16199 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
16200
16201 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
16202 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
16203 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
16204 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
16205 #define elf_backend_check_relocs elf32_arm_check_relocs
16206 #define elf_backend_relocate_section elf32_arm_relocate_section
16207 #define elf_backend_write_section elf32_arm_write_section
16208 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
16209 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
16210 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
16211 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
16212 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
16213 #define elf_backend_always_size_sections elf32_arm_always_size_sections
16214 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
16215 #define elf_backend_post_process_headers elf32_arm_post_process_headers
16216 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
16217 #define elf_backend_object_p elf32_arm_object_p
16218 #define elf_backend_fake_sections elf32_arm_fake_sections
16219 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
16220 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16221 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
16222 #define elf_backend_size_info elf32_arm_size_info
16223 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
16224 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
16225 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
16226 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
16227 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
16228
16229 #define elf_backend_can_refcount 1
16230 #define elf_backend_can_gc_sections 1
16231 #define elf_backend_plt_readonly 1
16232 #define elf_backend_want_got_plt 1
16233 #define elf_backend_want_plt_sym 0
16234 #define elf_backend_may_use_rel_p 1
16235 #define elf_backend_may_use_rela_p 0
16236 #define elf_backend_default_use_rela_p 0
16237
16238 #define elf_backend_got_header_size 12
16239 #define elf_backend_extern_protected_data 1
16240
16241 #undef elf_backend_obj_attrs_vendor
16242 #define elf_backend_obj_attrs_vendor "aeabi"
16243 #undef elf_backend_obj_attrs_section
16244 #define elf_backend_obj_attrs_section ".ARM.attributes"
16245 #undef elf_backend_obj_attrs_arg_type
16246 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
16247 #undef elf_backend_obj_attrs_section_type
16248 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
16249 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
16250 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
16251
16252 #include "elf32-target.h"
16253
16254 /* Native Client targets. */
16255
16256 #undef TARGET_LITTLE_SYM
16257 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
16258 #undef TARGET_LITTLE_NAME
16259 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
16260 #undef TARGET_BIG_SYM
16261 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
16262 #undef TARGET_BIG_NAME
16263 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
16264
16265 /* Like elf32_arm_link_hash_table_create -- but overrides
16266 appropriately for NaCl. */
16267
16268 static struct bfd_link_hash_table *
16269 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
16270 {
16271 struct bfd_link_hash_table *ret;
16272
16273 ret = elf32_arm_link_hash_table_create (abfd);
16274 if (ret)
16275 {
16276 struct elf32_arm_link_hash_table *htab
16277 = (struct elf32_arm_link_hash_table *) ret;
16278
16279 htab->nacl_p = 1;
16280
16281 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
16282 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
16283 }
16284 return ret;
16285 }
16286
16287 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
16288 really need to use elf32_arm_modify_segment_map. But we do it
16289 anyway just to reduce gratuitous differences with the stock ARM backend. */
16290
16291 static bfd_boolean
16292 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
16293 {
16294 return (elf32_arm_modify_segment_map (abfd, info)
16295 && nacl_modify_segment_map (abfd, info));
16296 }
16297
16298 static void
16299 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
16300 {
16301 elf32_arm_final_write_processing (abfd, linker);
16302 nacl_final_write_processing (abfd, linker);
16303 }
16304
16305 static bfd_vma
16306 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
16307 const arelent *rel ATTRIBUTE_UNUSED)
16308 {
16309 return plt->vma
16310 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
16311 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
16312 }
16313
16314 #undef elf32_bed
16315 #define elf32_bed elf32_arm_nacl_bed
16316 #undef bfd_elf32_bfd_link_hash_table_create
16317 #define bfd_elf32_bfd_link_hash_table_create \
16318 elf32_arm_nacl_link_hash_table_create
16319 #undef elf_backend_plt_alignment
16320 #define elf_backend_plt_alignment 4
16321 #undef elf_backend_modify_segment_map
16322 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
16323 #undef elf_backend_modify_program_headers
16324 #define elf_backend_modify_program_headers nacl_modify_program_headers
16325 #undef elf_backend_final_write_processing
16326 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
16327 #undef bfd_elf32_get_synthetic_symtab
16328 #undef elf_backend_plt_sym_val
16329 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
16330
16331 #undef ELF_MINPAGESIZE
16332 #undef ELF_COMMONPAGESIZE
16333
16334
16335 #include "elf32-target.h"
16336
16337 /* Reset to defaults. */
16338 #undef elf_backend_plt_alignment
16339 #undef elf_backend_modify_segment_map
16340 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
16341 #undef elf_backend_modify_program_headers
16342 #undef elf_backend_final_write_processing
16343 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16344 #undef ELF_MINPAGESIZE
16345 #define ELF_MINPAGESIZE 0x1000
16346 #undef ELF_COMMONPAGESIZE
16347 #define ELF_COMMONPAGESIZE 0x1000
16348
16349
16350 /* VxWorks Targets. */
16351
16352 #undef TARGET_LITTLE_SYM
16353 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
16354 #undef TARGET_LITTLE_NAME
16355 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
16356 #undef TARGET_BIG_SYM
16357 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
16358 #undef TARGET_BIG_NAME
16359 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
16360
16361 /* Like elf32_arm_link_hash_table_create -- but overrides
16362 appropriately for VxWorks. */
16363
16364 static struct bfd_link_hash_table *
16365 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
16366 {
16367 struct bfd_link_hash_table *ret;
16368
16369 ret = elf32_arm_link_hash_table_create (abfd);
16370 if (ret)
16371 {
16372 struct elf32_arm_link_hash_table *htab
16373 = (struct elf32_arm_link_hash_table *) ret;
16374 htab->use_rel = 0;
16375 htab->vxworks_p = 1;
16376 }
16377 return ret;
16378 }
16379
16380 static void
16381 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
16382 {
16383 elf32_arm_final_write_processing (abfd, linker);
16384 elf_vxworks_final_write_processing (abfd, linker);
16385 }
16386
16387 #undef elf32_bed
16388 #define elf32_bed elf32_arm_vxworks_bed
16389
16390 #undef bfd_elf32_bfd_link_hash_table_create
16391 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
16392 #undef elf_backend_final_write_processing
16393 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
16394 #undef elf_backend_emit_relocs
16395 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
16396
16397 #undef elf_backend_may_use_rel_p
16398 #define elf_backend_may_use_rel_p 0
16399 #undef elf_backend_may_use_rela_p
16400 #define elf_backend_may_use_rela_p 1
16401 #undef elf_backend_default_use_rela_p
16402 #define elf_backend_default_use_rela_p 1
16403 #undef elf_backend_want_plt_sym
16404 #define elf_backend_want_plt_sym 1
16405 #undef ELF_MAXPAGESIZE
16406 #define ELF_MAXPAGESIZE 0x1000
16407
16408 #include "elf32-target.h"
16409
16410
16411 /* Merge backend specific data from an object file to the output
16412 object file when linking. */
16413
16414 static bfd_boolean
16415 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
16416 {
16417 flagword out_flags;
16418 flagword in_flags;
16419 bfd_boolean flags_compatible = TRUE;
16420 asection *sec;
16421
16422 /* Check if we have the same endianness. */
16423 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
16424 return FALSE;
16425
16426 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
16427 return TRUE;
16428
16429 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
16430 return FALSE;
16431
16432 /* The input BFD must have had its flags initialised. */
16433 /* The following seems bogus to me -- The flags are initialized in
16434 the assembler but I don't think an elf_flags_init field is
16435 written into the object. */
16436 /* BFD_ASSERT (elf_flags_init (ibfd)); */
16437
16438 in_flags = elf_elfheader (ibfd)->e_flags;
16439 out_flags = elf_elfheader (obfd)->e_flags;
16440
16441 /* In theory there is no reason why we couldn't handle this. However
16442 in practice it isn't even close to working and there is no real
16443 reason to want it. */
16444 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
16445 && !(ibfd->flags & DYNAMIC)
16446 && (in_flags & EF_ARM_BE8))
16447 {
16448 _bfd_error_handler (_("error: %B is already in final BE8 format"),
16449 ibfd);
16450 return FALSE;
16451 }
16452
16453 if (!elf_flags_init (obfd))
16454 {
16455 /* If the input is the default architecture and had the default
16456 flags then do not bother setting the flags for the output
16457 architecture, instead allow future merges to do this. If no
16458 future merges ever set these flags then they will retain their
16459 uninitialised values, which surprise surprise, correspond
16460 to the default values. */
16461 if (bfd_get_arch_info (ibfd)->the_default
16462 && elf_elfheader (ibfd)->e_flags == 0)
16463 return TRUE;
16464
16465 elf_flags_init (obfd) = TRUE;
16466 elf_elfheader (obfd)->e_flags = in_flags;
16467
16468 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
16469 && bfd_get_arch_info (obfd)->the_default)
16470 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
16471
16472 return TRUE;
16473 }
16474
16475 /* Determine what should happen if the input ARM architecture
16476 does not match the output ARM architecture. */
16477 if (! bfd_arm_merge_machines (ibfd, obfd))
16478 return FALSE;
16479
16480 /* Identical flags must be compatible. */
16481 if (in_flags == out_flags)
16482 return TRUE;
16483
16484 /* Check to see if the input BFD actually contains any sections. If
16485 not, its flags may not have been initialised either, but it
16486 cannot actually cause any incompatiblity. Do not short-circuit
16487 dynamic objects; their section list may be emptied by
16488 elf_link_add_object_symbols.
16489
16490 Also check to see if there are no code sections in the input.
16491 In this case there is no need to check for code specific flags.
16492 XXX - do we need to worry about floating-point format compatability
16493 in data sections ? */
16494 if (!(ibfd->flags & DYNAMIC))
16495 {
16496 bfd_boolean null_input_bfd = TRUE;
16497 bfd_boolean only_data_sections = TRUE;
16498
16499 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
16500 {
16501 /* Ignore synthetic glue sections. */
16502 if (strcmp (sec->name, ".glue_7")
16503 && strcmp (sec->name, ".glue_7t"))
16504 {
16505 if ((bfd_get_section_flags (ibfd, sec)
16506 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
16507 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
16508 only_data_sections = FALSE;
16509
16510 null_input_bfd = FALSE;
16511 break;
16512 }
16513 }
16514
16515 if (null_input_bfd || only_data_sections)
16516 return TRUE;
16517 }
16518
16519 /* Complain about various flag mismatches. */
16520 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
16521 EF_ARM_EABI_VERSION (out_flags)))
16522 {
16523 _bfd_error_handler
16524 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
16525 ibfd, obfd,
16526 (in_flags & EF_ARM_EABIMASK) >> 24,
16527 (out_flags & EF_ARM_EABIMASK) >> 24);
16528 return FALSE;
16529 }
16530
16531 /* Not sure what needs to be checked for EABI versions >= 1. */
16532 /* VxWorks libraries do not use these flags. */
16533 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
16534 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
16535 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
16536 {
16537 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
16538 {
16539 _bfd_error_handler
16540 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
16541 ibfd, obfd,
16542 in_flags & EF_ARM_APCS_26 ? 26 : 32,
16543 out_flags & EF_ARM_APCS_26 ? 26 : 32);
16544 flags_compatible = FALSE;
16545 }
16546
16547 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
16548 {
16549 if (in_flags & EF_ARM_APCS_FLOAT)
16550 _bfd_error_handler
16551 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
16552 ibfd, obfd);
16553 else
16554 _bfd_error_handler
16555 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
16556 ibfd, obfd);
16557
16558 flags_compatible = FALSE;
16559 }
16560
16561 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
16562 {
16563 if (in_flags & EF_ARM_VFP_FLOAT)
16564 _bfd_error_handler
16565 (_("error: %B uses VFP instructions, whereas %B does not"),
16566 ibfd, obfd);
16567 else
16568 _bfd_error_handler
16569 (_("error: %B uses FPA instructions, whereas %B does not"),
16570 ibfd, obfd);
16571
16572 flags_compatible = FALSE;
16573 }
16574
16575 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
16576 {
16577 if (in_flags & EF_ARM_MAVERICK_FLOAT)
16578 _bfd_error_handler
16579 (_("error: %B uses Maverick instructions, whereas %B does not"),
16580 ibfd, obfd);
16581 else
16582 _bfd_error_handler
16583 (_("error: %B does not use Maverick instructions, whereas %B does"),
16584 ibfd, obfd);
16585
16586 flags_compatible = FALSE;
16587 }
16588
16589 #ifdef EF_ARM_SOFT_FLOAT
16590 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
16591 {
16592 /* We can allow interworking between code that is VFP format
16593 layout, and uses either soft float or integer regs for
16594 passing floating point arguments and results. We already
16595 know that the APCS_FLOAT flags match; similarly for VFP
16596 flags. */
16597 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
16598 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
16599 {
16600 if (in_flags & EF_ARM_SOFT_FLOAT)
16601 _bfd_error_handler
16602 (_("error: %B uses software FP, whereas %B uses hardware FP"),
16603 ibfd, obfd);
16604 else
16605 _bfd_error_handler
16606 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16607 ibfd, obfd);
16608
16609 flags_compatible = FALSE;
16610 }
16611 }
16612 #endif
16613
16614 /* Interworking mismatch is only a warning. */
16615 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16616 {
16617 if (in_flags & EF_ARM_INTERWORK)
16618 {
16619 _bfd_error_handler
16620 (_("Warning: %B supports interworking, whereas %B does not"),
16621 ibfd, obfd);
16622 }
16623 else
16624 {
16625 _bfd_error_handler
16626 (_("Warning: %B does not support interworking, whereas %B does"),
16627 ibfd, obfd);
16628 }
16629 }
16630 }
16631
16632 return flags_compatible;
16633 }
16634
16635
16636 /* Symbian OS Targets. */
16637
16638 #undef TARGET_LITTLE_SYM
16639 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
16640 #undef TARGET_LITTLE_NAME
16641 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16642 #undef TARGET_BIG_SYM
16643 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
16644 #undef TARGET_BIG_NAME
16645 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16646
16647 /* Like elf32_arm_link_hash_table_create -- but overrides
16648 appropriately for Symbian OS. */
16649
16650 static struct bfd_link_hash_table *
16651 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16652 {
16653 struct bfd_link_hash_table *ret;
16654
16655 ret = elf32_arm_link_hash_table_create (abfd);
16656 if (ret)
16657 {
16658 struct elf32_arm_link_hash_table *htab
16659 = (struct elf32_arm_link_hash_table *)ret;
16660 /* There is no PLT header for Symbian OS. */
16661 htab->plt_header_size = 0;
16662 /* The PLT entries are each one instruction and one word. */
16663 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16664 htab->symbian_p = 1;
16665 /* Symbian uses armv5t or above, so use_blx is always true. */
16666 htab->use_blx = 1;
16667 htab->root.is_relocatable_executable = 1;
16668 }
16669 return ret;
16670 }
16671
16672 static const struct bfd_elf_special_section
16673 elf32_arm_symbian_special_sections[] =
16674 {
16675 /* In a BPABI executable, the dynamic linking sections do not go in
16676 the loadable read-only segment. The post-linker may wish to
16677 refer to these sections, but they are not part of the final
16678 program image. */
16679 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16680 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16681 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16682 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16683 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16684 /* These sections do not need to be writable as the SymbianOS
16685 postlinker will arrange things so that no dynamic relocation is
16686 required. */
16687 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16688 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16689 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16690 { NULL, 0, 0, 0, 0 }
16691 };
16692
16693 static void
16694 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16695 struct bfd_link_info *link_info)
16696 {
16697 /* BPABI objects are never loaded directly by an OS kernel; they are
16698 processed by a postlinker first, into an OS-specific format. If
16699 the D_PAGED bit is set on the file, BFD will align segments on
16700 page boundaries, so that an OS can directly map the file. With
16701 BPABI objects, that just results in wasted space. In addition,
16702 because we clear the D_PAGED bit, map_sections_to_segments will
16703 recognize that the program headers should not be mapped into any
16704 loadable segment. */
16705 abfd->flags &= ~D_PAGED;
16706 elf32_arm_begin_write_processing (abfd, link_info);
16707 }
16708
16709 static bfd_boolean
16710 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16711 struct bfd_link_info *info)
16712 {
16713 struct elf_segment_map *m;
16714 asection *dynsec;
16715
16716 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16717 segment. However, because the .dynamic section is not marked
16718 with SEC_LOAD, the generic ELF code will not create such a
16719 segment. */
16720 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16721 if (dynsec)
16722 {
16723 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
16724 if (m->p_type == PT_DYNAMIC)
16725 break;
16726
16727 if (m == NULL)
16728 {
16729 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16730 m->next = elf_seg_map (abfd);
16731 elf_seg_map (abfd) = m;
16732 }
16733 }
16734
16735 /* Also call the generic arm routine. */
16736 return elf32_arm_modify_segment_map (abfd, info);
16737 }
16738
16739 /* Return address for Ith PLT stub in section PLT, for relocation REL
16740 or (bfd_vma) -1 if it should not be included. */
16741
16742 static bfd_vma
16743 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16744 const arelent *rel ATTRIBUTE_UNUSED)
16745 {
16746 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16747 }
16748
16749
16750 #undef elf32_bed
16751 #define elf32_bed elf32_arm_symbian_bed
16752
16753 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16754 will process them and then discard them. */
16755 #undef ELF_DYNAMIC_SEC_FLAGS
16756 #define ELF_DYNAMIC_SEC_FLAGS \
16757 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16758
16759 #undef elf_backend_emit_relocs
16760
16761 #undef bfd_elf32_bfd_link_hash_table_create
16762 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16763 #undef elf_backend_special_sections
16764 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16765 #undef elf_backend_begin_write_processing
16766 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16767 #undef elf_backend_final_write_processing
16768 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16769
16770 #undef elf_backend_modify_segment_map
16771 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16772
16773 /* There is no .got section for BPABI objects, and hence no header. */
16774 #undef elf_backend_got_header_size
16775 #define elf_backend_got_header_size 0
16776
16777 /* Similarly, there is no .got.plt section. */
16778 #undef elf_backend_want_got_plt
16779 #define elf_backend_want_got_plt 0
16780
16781 #undef elf_backend_plt_sym_val
16782 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16783
16784 #undef elf_backend_may_use_rel_p
16785 #define elf_backend_may_use_rel_p 1
16786 #undef elf_backend_may_use_rela_p
16787 #define elf_backend_may_use_rela_p 0
16788 #undef elf_backend_default_use_rela_p
16789 #define elf_backend_default_use_rela_p 0
16790 #undef elf_backend_want_plt_sym
16791 #define elf_backend_want_plt_sym 0
16792 #undef ELF_MAXPAGESIZE
16793 #define ELF_MAXPAGESIZE 0x8000
16794
16795 #include "elf32-target.h"
This page took 0.385773 seconds and 4 git commands to generate.